From 97697403ccf1f0fc244b82f8cee234f0a964358e Mon Sep 17 00:00:00 2001 From: Arnold <40414978+PatriceJiang@users.noreply.github.com> Date: Tue, 3 Aug 2021 10:45:04 +0800 Subject: [PATCH 1/2] add tbb --- ohos/CMakeLists.txt | 23 + ohos/arm64-v8a/include/tbb/aggregator.h | 204 + ohos/arm64-v8a/include/tbb/aligned_space.h | 60 + ohos/arm64-v8a/include/tbb/atomic.h | 586 ++ ohos/arm64-v8a/include/tbb/blocked_range.h | 168 + ohos/arm64-v8a/include/tbb/blocked_range2d.h | 104 + ohos/arm64-v8a/include/tbb/blocked_range3d.h | 123 + ohos/arm64-v8a/include/tbb/blocked_rangeNd.h | 150 + .../include/tbb/cache_aligned_allocator.h | 209 + ohos/arm64-v8a/include/tbb/combinable.h | 88 + .../include/tbb/compat/condition_variable | 489 ++ ohos/arm64-v8a/include/tbb/compat/ppl.h | 75 + ohos/arm64-v8a/include/tbb/compat/thread | 73 + ohos/arm64-v8a/include/tbb/compat/tuple | 501 ++ .../include/tbb/concurrent_hash_map.h | 1650 ++++++ .../include/tbb/concurrent_lru_cache.h | 290 + ohos/arm64-v8a/include/tbb/concurrent_map.h | 389 ++ .../include/tbb/concurrent_priority_queue.h | 552 ++ ohos/arm64-v8a/include/tbb/concurrent_queue.h | 479 ++ ohos/arm64-v8a/include/tbb/concurrent_set.h | 304 ++ .../include/tbb/concurrent_unordered_map.h | 492 ++ .../include/tbb/concurrent_unordered_set.h | 448 ++ .../arm64-v8a/include/tbb/concurrent_vector.h | 1396 +++++ ohos/arm64-v8a/include/tbb/critical_section.h | 147 + .../include/tbb/enumerable_thread_specific.h | 1173 ++++ ohos/arm64-v8a/include/tbb/flow_graph.h | 4735 +++++++++++++++++ .../include/tbb/flow_graph_abstractions.h | 53 + .../include/tbb/flow_graph_opencl_node.h | 1504 ++++++ ohos/arm64-v8a/include/tbb/global_control.h | 78 + ohos/arm64-v8a/include/tbb/index.html | 29 + ohos/arm64-v8a/include/tbb/info.h | 52 + .../include/tbb/internal/_aggregator_impl.h | 180 + .../include/tbb/internal/_allocator_traits.h | 156 + .../tbb/internal/_concurrent_queue_impl.h | 1081 ++++ .../tbb/internal/_concurrent_skip_list_impl.h | 1085 ++++ .../tbb/internal/_concurrent_unordered_impl.h | 1684 ++++++ .../_deprecated_header_message_guard.h | 69 + .../tbb/internal/_flow_graph_async_msg_impl.h | 153 + .../tbb/internal/_flow_graph_body_impl.h | 449 ++ .../tbb/internal/_flow_graph_cache_impl.h | 592 +++ .../include/tbb/internal/_flow_graph_impl.h | 547 ++ .../tbb/internal/_flow_graph_indexer_impl.h | 480 ++ .../internal/_flow_graph_item_buffer_impl.h | 283 + .../tbb/internal/_flow_graph_join_impl.h | 2002 +++++++ .../tbb/internal/_flow_graph_node_impl.h | 971 ++++ .../tbb/internal/_flow_graph_node_set_impl.h | 269 + .../internal/_flow_graph_nodes_deduction.h | 270 + .../tbb/internal/_flow_graph_streaming_node.h | 743 +++ .../internal/_flow_graph_tagged_buffer_impl.h | 249 + .../tbb/internal/_flow_graph_trace_impl.h | 364 ++ .../tbb/internal/_flow_graph_types_impl.h | 723 +++ .../include/tbb/internal/_mutex_padding.h | 98 + .../include/tbb/internal/_node_handle_impl.h | 168 + .../include/tbb/internal/_range_iterator.h | 66 + .../tbb/internal/_tbb_hash_compare_impl.h | 105 + .../include/tbb/internal/_tbb_strings.h | 79 + .../include/tbb/internal/_tbb_trace_impl.h | 55 + .../include/tbb/internal/_tbb_windef.h | 69 + .../include/tbb/internal/_template_helpers.h | 284 + .../_warning_suppress_disable_notice.h | 27 + .../_warning_suppress_enable_notice.h | 32 + .../tbb/internal/_x86_eliding_mutex_impl.h | 144 + .../tbb/internal/_x86_rtm_rw_mutex_impl.h | 223 + ohos/arm64-v8a/include/tbb/iterators.h | 326 ++ ohos/arm64-v8a/include/tbb/machine/gcc_arm.h | 216 + .../include/tbb/machine/gcc_generic.h | 233 + .../include/tbb/machine/gcc_ia32_common.h | 109 + ohos/arm64-v8a/include/tbb/machine/gcc_itsx.h | 119 + .../arm64-v8a/include/tbb/machine/ibm_aix51.h | 66 + .../include/tbb/machine/icc_generic.h | 258 + .../include/tbb/machine/linux_common.h | 105 + .../include/tbb/machine/linux_ia32.h | 228 + .../include/tbb/machine/linux_ia64.h | 177 + .../include/tbb/machine/linux_intel64.h | 92 + ohos/arm64-v8a/include/tbb/machine/mac_ppc.h | 309 ++ .../include/tbb/machine/macos_common.h | 129 + .../include/tbb/machine/mic_common.h | 53 + .../include/tbb/machine/msvc_armv7.h | 167 + .../include/tbb/machine/msvc_ia32_common.h | 275 + .../include/tbb/machine/sunos_sparc.h | 199 + .../include/tbb/machine/windows_api.h | 65 + .../include/tbb/machine/windows_ia32.h | 105 + .../include/tbb/machine/windows_intel64.h | 70 + ohos/arm64-v8a/include/tbb/memory_pool.h | 275 + ohos/arm64-v8a/include/tbb/mutex.h | 246 + ohos/arm64-v8a/include/tbb/null_mutex.h | 50 + ohos/arm64-v8a/include/tbb/null_rw_mutex.h | 52 + ohos/arm64-v8a/include/tbb/parallel_do.h | 553 ++ ohos/arm64-v8a/include/tbb/parallel_for.h | 425 ++ .../arm64-v8a/include/tbb/parallel_for_each.h | 133 + ohos/arm64-v8a/include/tbb/parallel_invoke.h | 460 ++ ohos/arm64-v8a/include/tbb/parallel_reduce.h | 657 +++ ohos/arm64-v8a/include/tbb/parallel_scan.h | 416 ++ ohos/arm64-v8a/include/tbb/parallel_sort.h | 257 + ohos/arm64-v8a/include/tbb/parallel_while.h | 188 + ohos/arm64-v8a/include/tbb/partitioner.h | 681 +++ ohos/arm64-v8a/include/tbb/pipeline.h | 682 +++ ohos/arm64-v8a/include/tbb/queuing_mutex.h | 113 + ohos/arm64-v8a/include/tbb/queuing_rw_mutex.h | 154 + .../include/tbb/reader_writer_lock.h | 246 + ohos/arm64-v8a/include/tbb/recursive_mutex.h | 248 + ohos/arm64-v8a/include/tbb/runtime_loader.h | 193 + .../include/tbb/scalable_allocator.h | 388 ++ ohos/arm64-v8a/include/tbb/spin_mutex.h | 214 + ohos/arm64-v8a/include/tbb/spin_rw_mutex.h | 252 + ohos/arm64-v8a/include/tbb/task.h | 1189 +++++ ohos/arm64-v8a/include/tbb/task_arena.h | 511 ++ ohos/arm64-v8a/include/tbb/task_group.h | 366 ++ .../include/tbb/task_scheduler_init.h | 174 + .../include/tbb/task_scheduler_observer.h | 166 + ohos/arm64-v8a/include/tbb/tbb.h | 97 + ohos/arm64-v8a/include/tbb/tbb_allocator.h | 203 + ohos/arm64-v8a/include/tbb/tbb_config.h | 873 +++ .../include/tbb/tbb_disable_exceptions.h | 31 + ohos/arm64-v8a/include/tbb/tbb_exception.h | 362 ++ ohos/arm64-v8a/include/tbb/tbb_machine.h | 978 ++++ ohos/arm64-v8a/include/tbb/tbb_profiling.h | 355 ++ ohos/arm64-v8a/include/tbb/tbb_stddef.h | 565 ++ ohos/arm64-v8a/include/tbb/tbb_thread.h | 345 ++ ohos/arm64-v8a/include/tbb/tbbmalloc_proxy.h | 65 + ohos/arm64-v8a/include/tbb/tick_count.h | 136 + ohos/arm64-v8a/lib/libtbb_static.a | Bin 0 -> 556120 bytes .../arm64-v8a/lib/libtbbmalloc_proxy_static.a | Bin 0 -> 16244 bytes ohos/arm64-v8a/lib/libtbbmalloc_static.a | Bin 0 -> 179614 bytes 124 files changed, 46924 insertions(+) create mode 100644 ohos/arm64-v8a/include/tbb/aggregator.h create mode 100644 ohos/arm64-v8a/include/tbb/aligned_space.h create mode 100644 ohos/arm64-v8a/include/tbb/atomic.h create mode 100644 ohos/arm64-v8a/include/tbb/blocked_range.h create mode 100644 ohos/arm64-v8a/include/tbb/blocked_range2d.h create mode 100644 ohos/arm64-v8a/include/tbb/blocked_range3d.h create mode 100644 ohos/arm64-v8a/include/tbb/blocked_rangeNd.h create mode 100644 ohos/arm64-v8a/include/tbb/cache_aligned_allocator.h create mode 100644 ohos/arm64-v8a/include/tbb/combinable.h create mode 100644 ohos/arm64-v8a/include/tbb/compat/condition_variable create mode 100644 ohos/arm64-v8a/include/tbb/compat/ppl.h create mode 100644 ohos/arm64-v8a/include/tbb/compat/thread create mode 100644 ohos/arm64-v8a/include/tbb/compat/tuple create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_hash_map.h create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_lru_cache.h create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_map.h create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_priority_queue.h create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_queue.h create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_set.h create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_unordered_map.h create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_unordered_set.h create mode 100644 ohos/arm64-v8a/include/tbb/concurrent_vector.h create mode 100644 ohos/arm64-v8a/include/tbb/critical_section.h create mode 100644 ohos/arm64-v8a/include/tbb/enumerable_thread_specific.h create mode 100644 ohos/arm64-v8a/include/tbb/flow_graph.h create mode 100644 ohos/arm64-v8a/include/tbb/flow_graph_abstractions.h create mode 100644 ohos/arm64-v8a/include/tbb/flow_graph_opencl_node.h create mode 100644 ohos/arm64-v8a/include/tbb/global_control.h create mode 100644 ohos/arm64-v8a/include/tbb/index.html create mode 100644 ohos/arm64-v8a/include/tbb/info.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_aggregator_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_allocator_traits.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_concurrent_queue_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_concurrent_skip_list_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_concurrent_unordered_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_deprecated_header_message_guard.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_async_msg_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_body_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_cache_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_indexer_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_item_buffer_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_join_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_node_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_node_set_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_nodes_deduction.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_streaming_node.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_tagged_buffer_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_trace_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_flow_graph_types_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_mutex_padding.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_node_handle_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_range_iterator.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_tbb_hash_compare_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_tbb_strings.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_tbb_trace_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_tbb_windef.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_template_helpers.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_warning_suppress_disable_notice.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_warning_suppress_enable_notice.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_x86_eliding_mutex_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/internal/_x86_rtm_rw_mutex_impl.h create mode 100644 ohos/arm64-v8a/include/tbb/iterators.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/gcc_arm.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/gcc_generic.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/gcc_ia32_common.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/gcc_itsx.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/ibm_aix51.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/icc_generic.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/linux_common.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/linux_ia32.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/linux_ia64.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/linux_intel64.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/mac_ppc.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/macos_common.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/mic_common.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/msvc_armv7.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/msvc_ia32_common.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/sunos_sparc.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/windows_api.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/windows_ia32.h create mode 100644 ohos/arm64-v8a/include/tbb/machine/windows_intel64.h create mode 100644 ohos/arm64-v8a/include/tbb/memory_pool.h create mode 100644 ohos/arm64-v8a/include/tbb/mutex.h create mode 100644 ohos/arm64-v8a/include/tbb/null_mutex.h create mode 100644 ohos/arm64-v8a/include/tbb/null_rw_mutex.h create mode 100644 ohos/arm64-v8a/include/tbb/parallel_do.h create mode 100644 ohos/arm64-v8a/include/tbb/parallel_for.h create mode 100644 ohos/arm64-v8a/include/tbb/parallel_for_each.h create mode 100644 ohos/arm64-v8a/include/tbb/parallel_invoke.h create mode 100644 ohos/arm64-v8a/include/tbb/parallel_reduce.h create mode 100644 ohos/arm64-v8a/include/tbb/parallel_scan.h create mode 100644 ohos/arm64-v8a/include/tbb/parallel_sort.h create mode 100644 ohos/arm64-v8a/include/tbb/parallel_while.h create mode 100644 ohos/arm64-v8a/include/tbb/partitioner.h create mode 100644 ohos/arm64-v8a/include/tbb/pipeline.h create mode 100644 ohos/arm64-v8a/include/tbb/queuing_mutex.h create mode 100644 ohos/arm64-v8a/include/tbb/queuing_rw_mutex.h create mode 100644 ohos/arm64-v8a/include/tbb/reader_writer_lock.h create mode 100644 ohos/arm64-v8a/include/tbb/recursive_mutex.h create mode 100644 ohos/arm64-v8a/include/tbb/runtime_loader.h create mode 100644 ohos/arm64-v8a/include/tbb/scalable_allocator.h create mode 100644 ohos/arm64-v8a/include/tbb/spin_mutex.h create mode 100644 ohos/arm64-v8a/include/tbb/spin_rw_mutex.h create mode 100644 ohos/arm64-v8a/include/tbb/task.h create mode 100644 ohos/arm64-v8a/include/tbb/task_arena.h create mode 100644 ohos/arm64-v8a/include/tbb/task_group.h create mode 100644 ohos/arm64-v8a/include/tbb/task_scheduler_init.h create mode 100644 ohos/arm64-v8a/include/tbb/task_scheduler_observer.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb_allocator.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb_config.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb_disable_exceptions.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb_exception.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb_machine.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb_profiling.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb_stddef.h create mode 100644 ohos/arm64-v8a/include/tbb/tbb_thread.h create mode 100644 ohos/arm64-v8a/include/tbb/tbbmalloc_proxy.h create mode 100644 ohos/arm64-v8a/include/tbb/tick_count.h create mode 100644 ohos/arm64-v8a/lib/libtbb_static.a create mode 100644 ohos/arm64-v8a/lib/libtbbmalloc_proxy_static.a create mode 100644 ohos/arm64-v8a/lib/libtbbmalloc_static.a diff --git a/ohos/CMakeLists.txt b/ohos/CMakeLists.txt index f70ee7b1..fa8aa3d2 100644 --- a/ohos/CMakeLists.txt +++ b/ohos/CMakeLists.txt @@ -143,6 +143,23 @@ set_target_properties(glslang-default-resource-limits PROPERTIES set(glslang_libs_name glslang OGLCompiler OSDependent SPIRV glslang-default-resource-limits) +############################# TBB ############################# +if(USE_JOB_SYSTEM_TASKFLOW) + add_library(tbb STATIC IMPORTED GLOBAL) + set_target_properties(tbb PROPERTIES + IMPORTED_LOCATION ${ohos_lib_dir}/libtbb_static.a + ) + add_library(tbbmalloc STATIC IMPORTED GLOBAL) + set_target_properties(tbbmalloc PROPERTIES + IMPORTED_LOCATION ${ohos_lib_dir}/libtbbmalloc_static.a + ) + add_library(tbbmalloc_proxy STATIC IMPORTED GLOBAL) + set_target_properties(tbbmalloc_proxy PROPERTIES + IMPORTED_LOCATION ${ohos_lib_dir}/libtbbmalloc_proxy_static.a + ) + set(tbb_libs_name tbbmalloc_proxy tbbmalloc tbb) +endif() + list(APPEND CC_EXTERNAL_LIBS freetype jpeg @@ -155,6 +172,12 @@ list(APPEND CC_EXTERNAL_LIBS mpg123 ) +if(USE_JOB_SYSTEM_TASKFLOW) + list(APPEND CC_EXTERNAL_LIBS + ${tbb_libs_name} + ) +endif() + set(ZLIB z) if(NOT USE_MODULES) list(APPEND CC_EXTERNAL_LIBS ${ZLIB}) diff --git a/ohos/arm64-v8a/include/tbb/aggregator.h b/ohos/arm64-v8a/include/tbb/aggregator.h new file mode 100644 index 00000000..786c52c8 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/aggregator.h @@ -0,0 +1,204 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__aggregator_H +#define __TBB__aggregator_H + +#define __TBB_aggregator_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if !TBB_PREVIEW_AGGREGATOR +#error Set TBB_PREVIEW_AGGREGATOR before including aggregator.h +#endif + +#include "atomic.h" +#include "tbb_profiling.h" + +namespace tbb { +namespace interface6 { + +using namespace tbb::internal; + +class aggregator_operation { + template friend class aggregator_ext; + uintptr_t status; + aggregator_operation* my_next; +public: + enum aggregator_operation_status { agg_waiting=0, agg_finished }; + aggregator_operation() : status(agg_waiting), my_next(NULL) {} + /// Call start before handling this operation + void start() { call_itt_notify(acquired, &status); } + /// Call finish when done handling this operation + /** The operation will be released to its originating thread, and possibly deleted. */ + void finish() { itt_store_word_with_release(status, uintptr_t(agg_finished)); } + aggregator_operation* next() { return itt_hide_load_word(my_next);} + void set_next(aggregator_operation* n) { itt_hide_store_word(my_next, n); } +}; + +namespace internal { + +class basic_operation_base : public aggregator_operation { + friend class basic_handler; + virtual void apply_body() = 0; +public: + basic_operation_base() : aggregator_operation() {} + virtual ~basic_operation_base() {} +}; + +template +class basic_operation : public basic_operation_base, no_assign { + const Body& my_body; + void apply_body() __TBB_override { my_body(); } +public: + basic_operation(const Body& b) : basic_operation_base(), my_body(b) {} +}; + +class basic_handler { +public: + basic_handler() {} + void operator()(aggregator_operation* op_list) const { + while (op_list) { + // ITT note: &(op_list->status) tag is used to cover accesses to the operation data. + // The executing thread "acquires" the tag (see start()) and then performs + // the associated operation w/o triggering a race condition diagnostics. + // A thread that created the operation is waiting for its status (see execute_impl()), + // so when this thread is done with the operation, it will "release" the tag + // and update the status (see finish()) to give control back to the waiting thread. + basic_operation_base& request = static_cast(*op_list); + // IMPORTANT: need to advance op_list to op_list->next() before calling request.finish() + op_list = op_list->next(); + request.start(); + request.apply_body(); + request.finish(); + } + } +}; + +} // namespace internal + +//! Aggregator base class and expert interface +/** An aggregator for collecting operations coming from multiple sources and executing + them serially on a single thread. */ +template +class aggregator_ext : tbb::internal::no_copy { +public: + aggregator_ext(const handler_type& h) : handler_busy(0), handle_operations(h) { mailbox = NULL; } + + //! EXPERT INTERFACE: Enter a user-made operation into the aggregator's mailbox. + /** Details of user-made operations must be handled by user-provided handler */ + void process(aggregator_operation *op) { execute_impl(*op); } + +protected: + /** Place operation in mailbox, then either handle mailbox or wait for the operation + to be completed by a different thread. */ + void execute_impl(aggregator_operation& op) { + aggregator_operation* res; + + // ITT note: &(op.status) tag is used to cover accesses to this operation. This + // thread has created the operation, and now releases it so that the handler + // thread may handle the associated operation w/o triggering a race condition; + // thus this tag will be acquired just before the operation is handled in the + // handle_operations functor. + call_itt_notify(releasing, &(op.status)); + // insert the operation into the list + do { + // ITT may flag the following line as a race; it is a false positive: + // This is an atomic read; we don't provide itt_hide_load_word for atomics + op.my_next = res = mailbox; // NOT A RACE + } while (mailbox.compare_and_swap(&op, res) != res); + if (!res) { // first in the list; handle the operations + // ITT note: &mailbox tag covers access to the handler_busy flag, which this + // waiting handler thread will try to set before entering handle_operations. + call_itt_notify(acquired, &mailbox); + start_handle_operations(); + __TBB_ASSERT(op.status, NULL); + } + else { // not first; wait for op to be ready + call_itt_notify(prepare, &(op.status)); + spin_wait_while_eq(op.status, uintptr_t(aggregator_operation::agg_waiting)); + itt_load_word_with_acquire(op.status); + } + } + + +private: + //! An atomically updated list (aka mailbox) of aggregator_operations + atomic mailbox; + + //! Controls thread access to handle_operations + /** Behaves as boolean flag where 0=false, 1=true */ + uintptr_t handler_busy; + + handler_type handle_operations; + + //! Trigger the handling of operations when the handler is free + void start_handle_operations() { + aggregator_operation *pending_operations; + + // ITT note: &handler_busy tag covers access to mailbox as it is passed + // between active and waiting handlers. Below, the waiting handler waits until + // the active handler releases, and the waiting handler acquires &handler_busy as + // it becomes the active_handler. The release point is at the end of this + // function, when all operations in mailbox have been handled by the + // owner of this aggregator. + call_itt_notify(prepare, &handler_busy); + // get handler_busy: only one thread can possibly spin here at a time + spin_wait_until_eq(handler_busy, uintptr_t(0)); + call_itt_notify(acquired, &handler_busy); + // acquire fence not necessary here due to causality rule and surrounding atomics + __TBB_store_with_release(handler_busy, uintptr_t(1)); + + // ITT note: &mailbox tag covers access to the handler_busy flag itself. + // Capturing the state of the mailbox signifies that handler_busy has been + // set and a new active handler will now process that list's operations. + call_itt_notify(releasing, &mailbox); + // grab pending_operations + pending_operations = mailbox.fetch_and_store(NULL); + + // handle all the operations + handle_operations(pending_operations); + + // release the handler + itt_store_word_with_release(handler_busy, uintptr_t(0)); + } +}; + +//! Basic aggregator interface +class aggregator : private aggregator_ext { +public: + aggregator() : aggregator_ext(internal::basic_handler()) {} + //! BASIC INTERFACE: Enter a function for exclusive execution by the aggregator. + /** The calling thread stores the function object in a basic_operation and + places the operation in the aggregator's mailbox */ + template + void execute(const Body& b) { + internal::basic_operation op(b); + this->execute_impl(op); + } +}; + +} // namespace interface6 + +using interface6::aggregator; +using interface6::aggregator_ext; +using interface6::aggregator_operation; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_aggregator_H_include_area + +#endif // __TBB__aggregator_H diff --git a/ohos/arm64-v8a/include/tbb/aligned_space.h b/ohos/arm64-v8a/include/tbb/aligned_space.h new file mode 100644 index 00000000..1b047f97 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/aligned_space.h @@ -0,0 +1,60 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_aligned_space_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_aligned_space_H +#pragma message("TBB Warning: tbb/aligned_space.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_aligned_space_H +#define __TBB_aligned_space_H + +#define __TBB_aligned_space_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_stddef.h" +#include "tbb_machine.h" + +namespace tbb { + +//! Block of space aligned sufficiently to construct an array T with N elements. +/** The elements are not constructed or destroyed by this class. + @ingroup memory_allocation */ +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::aligned_space is deprecated, use std::aligned_storage") aligned_space { +private: + typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type; + element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)]; +public: + //! Pointer to beginning of array + T* begin() const {return internal::punned_cast(this);} + + //! Pointer to one past last element in array. + T* end() const {return begin()+N;} +}; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_aligned_space_H_include_area + +#endif /* __TBB_aligned_space_H */ diff --git a/ohos/arm64-v8a/include/tbb/atomic.h b/ohos/arm64-v8a/include/tbb/atomic.h new file mode 100644 index 00000000..e602306f --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/atomic.h @@ -0,0 +1,586 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_atomic_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_atomic_H +#pragma message("TBB Warning: tbb/atomic.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_atomic_H +#define __TBB_atomic_H + +#define __TBB_atomic_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include + +#if _MSC_VER +#define __TBB_LONG_LONG __int64 +#else +#define __TBB_LONG_LONG long long +#endif /* _MSC_VER */ + +#include "tbb_machine.h" + +#if _MSC_VER && !__INTEL_COMPILER + // Suppress overzealous compiler warnings till the end of the file + #pragma warning (push) + #pragma warning (disable: 4244 4267 4512) +#endif + +namespace tbb { + +//! Specifies memory semantics. +enum memory_semantics { + //! Sequential consistency + full_fence, + //! Acquire + acquire, + //! Release + release, + //! No ordering + relaxed +}; + +//! @cond INTERNAL +namespace internal { + +#if __TBB_ALIGNAS_PRESENT + #define __TBB_DECL_ATOMIC_FIELD(t,f,a) alignas(a) t f; +#elif __TBB_ATTRIBUTE_ALIGNED_PRESENT + #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a))); +#elif __TBB_DECLSPEC_ALIGN_PRESENT + #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f; +#else + #error Do not know syntax for forcing alignment. +#endif + +template +struct atomic_rep; // Primary template declared, but never defined. + +template<> +struct atomic_rep<1> { // Specialization + typedef int8_t word; +}; +template<> +struct atomic_rep<2> { // Specialization + typedef int16_t word; +}; +template<> +struct atomic_rep<4> { // Specialization +#if _MSC_VER && !_WIN64 + // Work-around that avoids spurious /Wp64 warnings + typedef intptr_t word; +#else + typedef int32_t word; +#endif +}; +#if __TBB_64BIT_ATOMICS +template<> +struct atomic_rep<8> { // Specialization + typedef int64_t word; +}; +#endif + +template +struct aligned_storage; + +//the specializations are needed to please MSVC syntax of __declspec(align()) which accept _literal_ constants only +#if __TBB_ATOMIC_CTORS + #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ + template \ + struct aligned_storage { \ + __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ + aligned_storage() = default ; \ + constexpr aligned_storage(value_type value):my_value(value){} \ + }; \ + +#else + #define ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(S) \ + template \ + struct aligned_storage { \ + __TBB_DECL_ATOMIC_FIELD(value_type,my_value,S) \ + }; \ + +#endif + +template +struct aligned_storage { + value_type my_value; +#if __TBB_ATOMIC_CTORS + aligned_storage() = default ; + constexpr aligned_storage(value_type value):my_value(value){} +#endif +}; + +ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(2) +ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(4) +#if __TBB_64BIT_ATOMICS +ATOMIC_STORAGE_PARTIAL_SPECIALIZATION(8) +#endif + +template +struct atomic_traits; // Primary template declared, but not defined. + +#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \ + template<> struct atomic_traits { \ + typedef atomic_rep::word word; \ + inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ + return __TBB_machine_cmpswp##S##M(location,new_value,comparand); \ + } \ + inline static word fetch_and_add( volatile void* location, word addend ) { \ + return __TBB_machine_fetchadd##S##M(location,addend); \ + } \ + inline static word fetch_and_store( volatile void* location, word value ) { \ + return __TBB_machine_fetchstore##S##M(location,value); \ + } \ + }; + +#define __TBB_DECL_ATOMIC_PRIMITIVES(S) \ + template \ + struct atomic_traits { \ + typedef atomic_rep::word word; \ + inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) { \ + return __TBB_machine_cmpswp##S(location,new_value,comparand); \ + } \ + inline static word fetch_and_add( volatile void* location, word addend ) { \ + return __TBB_machine_fetchadd##S(location,addend); \ + } \ + inline static word fetch_and_store( volatile void* location, word value ) { \ + return __TBB_machine_fetchstore##S(location,value); \ + } \ + }; + +template +struct atomic_load_store_traits; // Primary template declaration + +#define __TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(M) \ + template<> struct atomic_load_store_traits { \ + template \ + inline static T load( const volatile T& location ) { \ + return __TBB_load_##M( location ); \ + } \ + template \ + inline static void store( volatile T& location, T value ) { \ + __TBB_store_##M( location, value ); \ + } \ + } + +#if __TBB_USE_FENCED_ATOMICS +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,full_fence) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,full_fence) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,full_fence) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,relaxed) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,relaxed) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,relaxed) +#if __TBB_64BIT_ATOMICS +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,full_fence) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release) +__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,relaxed) +#endif +#else /* !__TBB_USE_FENCED_ATOMICS */ +__TBB_DECL_ATOMIC_PRIMITIVES(1) +__TBB_DECL_ATOMIC_PRIMITIVES(2) +__TBB_DECL_ATOMIC_PRIMITIVES(4) +#if __TBB_64BIT_ATOMICS +__TBB_DECL_ATOMIC_PRIMITIVES(8) +#endif +#endif /* !__TBB_USE_FENCED_ATOMICS */ + +__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(full_fence); +__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(acquire); +__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(release); +__TBB_DECL_ATOMIC_LOAD_STORE_PRIMITIVES(relaxed); + +//! Additive inverse of 1 for type T. +/** Various compilers issue various warnings if -1 is used with various integer types. + The baroque expression below avoids all the warnings (we hope). */ +#define __TBB_MINUS_ONE(T) (T(T(0)-T(1))) + +//! Base class that provides basic functionality for atomic without fetch_and_add. +/** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, + and can be copied/compared by memcpy/memcmp. */ +template +struct atomic_impl { +protected: + aligned_storage my_storage; +private: + //TODO: rechecks on recent versions of gcc if union is still the _only_ way to do a conversion without warnings + //! Union type used to convert type T to underlying integral type. + template + union converter { + typedef typename atomic_rep::word bits_type; + converter(){} + converter(value_type a_value) : value(a_value) {} + value_type value; + bits_type bits; + }; + + template + static typename converter::bits_type to_bits(value_t value){ + return converter(value).bits; + } + template + static value_t to_value(typename converter::bits_type bits){ + converter u; + u.bits = bits; + return u.value; + } + + template + union ptr_converter; //Primary template declared, but never defined. + + template + union ptr_converter { + ptr_converter(){} + ptr_converter(value_t* a_value) : value(a_value) {} + value_t* value; + uintptr_t bits; + }; + //TODO: check if making to_bits accepting reference (thus unifying it with to_bits_ref) + //does not hurt performance + template + static typename converter::bits_type & to_bits_ref(value_t& value){ + //TODO: this #ifdef is temporary workaround, as union conversion seems to fail + //on suncc for 64 bit types for 32 bit target + #if !__SUNPRO_CC + return *(typename converter::bits_type*)ptr_converter(&value).bits; + #else + return *(typename converter::bits_type*)(&value); + #endif + } + + +public: + typedef T value_type; + +#if __TBB_ATOMIC_CTORS + atomic_impl() = default ; + constexpr atomic_impl(value_type value):my_storage(value){} +#endif + template + value_type fetch_and_store( value_type value ) { + return to_value( + internal::atomic_traits::fetch_and_store( &my_storage.my_value, to_bits(value) ) + ); + } + + value_type fetch_and_store( value_type value ) { + return fetch_and_store(value); + } + + template + value_type compare_and_swap( value_type value, value_type comparand ) { + return to_value( + internal::atomic_traits::compare_and_swap( &my_storage.my_value, to_bits(value), to_bits(comparand) ) + ); + } + + value_type compare_and_swap( value_type value, value_type comparand ) { + return compare_and_swap(value,comparand); + } + + operator value_type() const volatile { // volatile qualifier here for backwards compatibility + return to_value( + __TBB_load_with_acquire( to_bits_ref(my_storage.my_value) ) + ); + } + + template + value_type load () const { + return to_value( + internal::atomic_load_store_traits::load( to_bits_ref(my_storage.my_value) ) + ); + } + + value_type load () const { + return load(); + } + + template + void store ( value_type value ) { + internal::atomic_load_store_traits::store( to_bits_ref(my_storage.my_value), to_bits(value)); + } + + void store ( value_type value ) { + store( value ); + } + +protected: + value_type store_with_release( value_type rhs ) { + //TODO: unify with store + __TBB_store_with_release( to_bits_ref(my_storage.my_value), to_bits(rhs) ); + return rhs; + } +}; + +//! Base class that provides basic functionality for atomic with fetch_and_add. +/** I is the underlying type. + D is the difference type. + StepType should be char if I is an integral type, and T if I is a T*. */ +template +struct atomic_impl_with_arithmetic: atomic_impl { +public: + typedef I value_type; +#if __TBB_ATOMIC_CTORS + atomic_impl_with_arithmetic() = default ; + constexpr atomic_impl_with_arithmetic(value_type value): atomic_impl(value){} +#endif + template + value_type fetch_and_add( D addend ) { + return value_type(internal::atomic_traits::fetch_and_add( &this->my_storage.my_value, addend*sizeof(StepType) )); + } + + value_type fetch_and_add( D addend ) { + return fetch_and_add(addend); + } + + template + value_type fetch_and_increment() { + return fetch_and_add(1); + } + + value_type fetch_and_increment() { + return fetch_and_add(1); + } + + template + value_type fetch_and_decrement() { + return fetch_and_add(__TBB_MINUS_ONE(D)); + } + + value_type fetch_and_decrement() { + return fetch_and_add(__TBB_MINUS_ONE(D)); + } + +public: + value_type operator+=( D value ) { + return fetch_and_add(value)+value; + } + + value_type operator-=( D value ) { + // Additive inverse of value computed using binary minus, + // instead of unary minus, for sake of avoiding compiler warnings. + return operator+=(D(0)-value); + } + + value_type operator++() { + return fetch_and_add(1)+1; + } + + value_type operator--() { + return fetch_and_add(__TBB_MINUS_ONE(D))-1; + } + + value_type operator++(int) { + return fetch_and_add(1); + } + + value_type operator--(int) { + return fetch_and_add(__TBB_MINUS_ONE(D)); + } +}; + +} /* Internal */ +//! @endcond + +//! Primary template for atomic. +/** See the Reference for details. + @ingroup synchronization */ +template +struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") +atomic: internal::atomic_impl { +#if __TBB_ATOMIC_CTORS + atomic() = default; + constexpr atomic(T arg): internal::atomic_impl(arg) {} + constexpr atomic(const atomic& rhs): internal::atomic_impl(rhs) {} +#endif + T operator=( T rhs ) { + // "this" required here in strict ISO C++ because store_with_release is a dependent name + return this->store_with_release(rhs); + } + atomic& operator=( const atomic& rhs ) {this->store_with_release(rhs); return *this;} +}; + +#if __TBB_ATOMIC_CTORS + #define __TBB_DECL_ATOMIC(T) \ + template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") \ + atomic: internal::atomic_impl_with_arithmetic { \ + atomic() = default; \ + constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \ + constexpr atomic(const atomic& rhs): \ + internal::atomic_impl_with_arithmetic(rhs) {} \ + \ + T operator=( T rhs ) {return store_with_release(rhs);} \ + atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ + }; +#else + #define __TBB_DECL_ATOMIC(T) \ + template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") \ + atomic: internal::atomic_impl_with_arithmetic { \ + T operator=( T rhs ) {return store_with_release(rhs);} \ + atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ + }; +#endif + +#if __TBB_64BIT_ATOMICS +//TODO: consider adding non-default (and atomic) copy constructor for 32bit platform +__TBB_DECL_ATOMIC(__TBB_LONG_LONG) +__TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) +#else +// test_atomic will verify that sizeof(long long)==8 +#endif +__TBB_DECL_ATOMIC(long) +__TBB_DECL_ATOMIC(unsigned long) + +#if _MSC_VER && !_WIN64 +#if __TBB_ATOMIC_CTORS +/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. + It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) + with an operator=(U) that explicitly converts the U to a T. Types T and U should be + type synonyms on the platform. Type U should be the wider variant of T from the + perspective of /Wp64. */ +#define __TBB_DECL_ATOMIC_ALT(T,U) \ + template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") \ + atomic: internal::atomic_impl_with_arithmetic { \ + atomic() = default ; \ + constexpr atomic(T arg): internal::atomic_impl_with_arithmetic(arg) {} \ + constexpr atomic(const atomic& rhs): \ + internal::atomic_impl_with_arithmetic(rhs) {} \ + \ + T operator=( U rhs ) {return store_with_release(T(rhs));} \ + atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ + }; +#else +#define __TBB_DECL_ATOMIC_ALT(T,U) \ + template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") \ + atomic: internal::atomic_impl_with_arithmetic { \ + T operator=( U rhs ) {return store_with_release(T(rhs));} \ + atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ + }; +#endif +__TBB_DECL_ATOMIC_ALT(unsigned,size_t) +__TBB_DECL_ATOMIC_ALT(int,ptrdiff_t) +#else +__TBB_DECL_ATOMIC(unsigned) +__TBB_DECL_ATOMIC(int) +#endif /* _MSC_VER && !_WIN64 */ + +__TBB_DECL_ATOMIC(unsigned short) +__TBB_DECL_ATOMIC(short) +__TBB_DECL_ATOMIC(char) +__TBB_DECL_ATOMIC(signed char) +__TBB_DECL_ATOMIC(unsigned char) + +#if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED) +__TBB_DECL_ATOMIC(wchar_t) +#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ + +//! Specialization for atomic with arithmetic and operator->. +template struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") +atomic: internal::atomic_impl_with_arithmetic { +#if __TBB_ATOMIC_CTORS + atomic() = default ; + constexpr atomic(T* arg): internal::atomic_impl_with_arithmetic(arg) {} + constexpr atomic(const atomic& rhs): internal::atomic_impl_with_arithmetic(rhs) {} +#endif + T* operator=( T* rhs ) { + // "this" required here in strict ISO C++ because store_with_release is a dependent name + return this->store_with_release(rhs); + } + atomic& operator=( const atomic& rhs ) { + this->store_with_release(rhs); return *this; + } + T* operator->() const { + return (*this); + } +}; + +//! Specialization for atomic, for sake of not allowing arithmetic or operator->. +template<> struct __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::atomic is deprecated, use std::atomic") +atomic: internal::atomic_impl { +#if __TBB_ATOMIC_CTORS + atomic() = default ; + constexpr atomic(void* arg): internal::atomic_impl(arg) {} + constexpr atomic(const atomic& rhs): internal::atomic_impl(rhs) {} +#endif + void* operator=( void* rhs ) { + // "this" required here in strict ISO C++ because store_with_release is a dependent name + return this->store_with_release(rhs); + } + atomic& operator=( const atomic& rhs ) { + this->store_with_release(rhs); return *this; + } +}; + +// Helpers to workaround ugly syntax of calling template member function of a +// template class with template argument dependent on template parameters. + +template +T load ( const atomic& a ) { return a.template load(); } + +template +void store ( atomic& a, T value ) { a.template store(value); } + +namespace interface6{ +//! Make an atomic for use in an initialization (list), as an alternative to zero-initialization or normal assignment. +template +atomic make_atomic(T t) { + atomic a; + store(a,t); + return a; +} +} +using interface6::make_atomic; + +namespace internal { +template +void swap(atomic & lhs, atomic & rhs){ + T tmp = load(lhs); + store(lhs,load(rhs)); + store(rhs,tmp); +} + +// only to aid in the gradual conversion of ordinary variables to proper atomics +template +inline atomic& as_atomic( T& t ) { + return (atomic&)t; +} +} // namespace tbb::internal + +} // namespace tbb + +#if _MSC_VER && !__INTEL_COMPILER + #pragma warning (pop) +#endif // warnings are restored + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_atomic_H_include_area + +#endif /* __TBB_atomic_H */ diff --git a/ohos/arm64-v8a/include/tbb/blocked_range.h b/ohos/arm64-v8a/include/tbb/blocked_range.h new file mode 100644 index 00000000..b77e7e0a --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/blocked_range.h @@ -0,0 +1,168 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_blocked_range_H +#define __TBB_blocked_range_H + +#include "tbb_stddef.h" + +namespace tbb { + +namespace internal { + +// blocked_rangeNd_impl forward declaration in tbb::internal namespace to +// name it as a friend for a tbb::blocked_range. +template +class blocked_rangeNd_impl; + +} // namespace internal + +/** \page range_req Requirements on range concept + Class \c R implementing the concept of range must define: + - \code R::R( const R& ); \endcode Copy constructor + - \code R::~R(); \endcode Destructor + - \code bool R::is_divisible() const; \endcode True if range can be partitioned into two subranges + - \code bool R::empty() const; \endcode True if range is empty + - \code R::R( R& r, split ); \endcode Split range \c r into two subranges. +**/ + +//! A range over which to iterate. +/** @ingroup algorithms */ +template +class blocked_range { +public: + //! Type of a value + /** Called a const_iterator for sake of algorithms that need to treat a blocked_range + as an STL container. */ + typedef Value const_iterator; + + //! Type for size of a range + typedef std::size_t size_type; + +#if __TBB_DEPRECATED_BLOCKED_RANGE_DEFAULT_CTOR + //! Construct range with default-constructed values for begin, end, and grainsize. + /** Requires that Value have a default constructor. */ + blocked_range() : my_end(), my_begin(), my_grainsize() {} +#endif + + //! Construct range over half-open interval [begin,end), with the given grainsize. + blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) : + my_end(end_), my_begin(begin_), my_grainsize(grainsize_) + { + __TBB_ASSERT( my_grainsize>0, "grainsize must be positive" ); + } + + //! Beginning of range. + const_iterator begin() const {return my_begin;} + + //! One past last value in range. + const_iterator end() const {return my_end;} + + //! Size of the range + /** Unspecified if end() + friend class blocked_range2d; + + template + friend class blocked_range3d; + + template + friend class internal::blocked_rangeNd_impl; +}; + +} // namespace tbb + +#endif /* __TBB_blocked_range_H */ diff --git a/ohos/arm64-v8a/include/tbb/blocked_range2d.h b/ohos/arm64-v8a/include/tbb/blocked_range2d.h new file mode 100644 index 00000000..2498e046 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/blocked_range2d.h @@ -0,0 +1,104 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_blocked_range2d_H +#define __TBB_blocked_range2d_H + +#include "tbb_stddef.h" +#include "blocked_range.h" + +namespace tbb { + +//! A 2-dimensional range that models the Range concept. +/** @ingroup algorithms */ +template +class blocked_range2d { +public: + //! Type for size of an iteration range + typedef blocked_range row_range_type; + typedef blocked_range col_range_type; + +private: + row_range_type my_rows; + col_range_type my_cols; + +public: + + blocked_range2d( RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, + ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : + my_rows(row_begin,row_end,row_grainsize), + my_cols(col_begin,col_end,col_grainsize) + {} + + blocked_range2d( RowValue row_begin, RowValue row_end, + ColValue col_begin, ColValue col_end ) : + my_rows(row_begin,row_end), + my_cols(col_begin,col_end) + {} + + //! True if range is empty + bool empty() const { + // Range is empty if at least one dimension is empty. + return my_rows.empty() || my_cols.empty(); + } + + //! True if range is divisible into two pieces. + bool is_divisible() const { + return my_rows.is_divisible() || my_cols.is_divisible(); + } + + blocked_range2d( blocked_range2d& r, split ) : + my_rows(r.my_rows), + my_cols(r.my_cols) + { + split split_obj; + do_split(r, split_obj); + } + +#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES + //! Static field to support proportional split + static const bool is_splittable_in_proportion = true; + + blocked_range2d( blocked_range2d& r, proportional_split& proportion ) : + my_rows(r.my_rows), + my_cols(r.my_cols) + { + do_split(r, proportion); + } +#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ + + //! The rows of the iteration space + const row_range_type& rows() const {return my_rows;} + + //! The columns of the iteration space + const col_range_type& cols() const {return my_cols;} + +private: + + template + void do_split( blocked_range2d& r, Split& split_obj ) + { + if( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { + my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); + } else { + my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); + } + } +}; + +} // namespace tbb + +#endif /* __TBB_blocked_range2d_H */ diff --git a/ohos/arm64-v8a/include/tbb/blocked_range3d.h b/ohos/arm64-v8a/include/tbb/blocked_range3d.h new file mode 100644 index 00000000..15f93130 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/blocked_range3d.h @@ -0,0 +1,123 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_blocked_range3d_H +#define __TBB_blocked_range3d_H + +#include "tbb_stddef.h" +#include "blocked_range.h" + +namespace tbb { + +//! A 3-dimensional range that models the Range concept. +/** @ingroup algorithms */ +template +class blocked_range3d { +public: + //! Type for size of an iteration range + typedef blocked_range page_range_type; + typedef blocked_range row_range_type; + typedef blocked_range col_range_type; + +private: + page_range_type my_pages; + row_range_type my_rows; + col_range_type my_cols; + +public: + + blocked_range3d( PageValue page_begin, PageValue page_end, + RowValue row_begin, RowValue row_end, + ColValue col_begin, ColValue col_end ) : + my_pages(page_begin,page_end), + my_rows(row_begin,row_end), + my_cols(col_begin,col_end) + {} + + blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, + RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, + ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : + my_pages(page_begin,page_end,page_grainsize), + my_rows(row_begin,row_end,row_grainsize), + my_cols(col_begin,col_end,col_grainsize) + {} + + //! True if range is empty + bool empty() const { + // Range is empty if at least one dimension is empty. + return my_pages.empty() || my_rows.empty() || my_cols.empty(); + } + + //! True if range is divisible into two pieces. + bool is_divisible() const { + return my_pages.is_divisible() || my_rows.is_divisible() || my_cols.is_divisible(); + } + + blocked_range3d( blocked_range3d& r, split ) : + my_pages(r.my_pages), + my_rows(r.my_rows), + my_cols(r.my_cols) + { + split split_obj; + do_split(r, split_obj); + } + +#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES + //! Static field to support proportional split + static const bool is_splittable_in_proportion = true; + + blocked_range3d( blocked_range3d& r, proportional_split& proportion ) : + my_pages(r.my_pages), + my_rows(r.my_rows), + my_cols(r.my_cols) + { + do_split(r, proportion); + } +#endif /* __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES */ + + //! The pages of the iteration space + const page_range_type& pages() const {return my_pages;} + + //! The rows of the iteration space + const row_range_type& rows() const {return my_rows;} + + //! The columns of the iteration space + const col_range_type& cols() const {return my_cols;} + +private: + + template + void do_split( blocked_range3d& r, Split& split_obj) + { + if ( my_pages.size()*double(my_rows.grainsize()) < my_rows.size()*double(my_pages.grainsize()) ) { + if ( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { + my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); + } else { + my_rows.my_begin = row_range_type::do_split(r.my_rows, split_obj); + } + } else { + if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) { + my_cols.my_begin = col_range_type::do_split(r.my_cols, split_obj); + } else { + my_pages.my_begin = page_range_type::do_split(r.my_pages, split_obj); + } + } + } +}; + +} // namespace tbb + +#endif /* __TBB_blocked_range3d_H */ diff --git a/ohos/arm64-v8a/include/tbb/blocked_rangeNd.h b/ohos/arm64-v8a/include/tbb/blocked_rangeNd.h new file mode 100644 index 00000000..922c77c6 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/blocked_rangeNd.h @@ -0,0 +1,150 @@ +/* + Copyright (c) 2017-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_blocked_rangeNd_H +#define __TBB_blocked_rangeNd_H + +#if ! TBB_PREVIEW_BLOCKED_RANGE_ND + #error Set TBB_PREVIEW_BLOCKED_RANGE_ND to include blocked_rangeNd.h +#endif + +#include "tbb_config.h" + +// tbb::blocked_rangeNd requires C++11 support +#if __TBB_CPP11_PRESENT && __TBB_CPP11_ARRAY_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT + +#include "internal/_template_helpers.h" // index_sequence, make_index_sequence + +#include +#include // std::any_of +#include // std::is_same, std::enable_if + +#include "tbb/blocked_range.h" + +namespace tbb { +namespace internal { + +/* + The blocked_rangeNd_impl uses make_index_sequence to automatically generate a ctor with + exactly N arguments of the type tbb::blocked_range. Such ctor provides an opportunity + to use braced-init-list parameters to initialize each dimension. + Use of parameters, whose representation is a braced-init-list, but they're not + std::initializer_list or a reference to one, produces a non-deduced context + within template argument deduction. + + NOTE: blocked_rangeNd must be exactly a templated alias to the blocked_rangeNd_impl + (and not e.g. a derived class), otherwise it would need to declare its own ctor + facing the same problem that the impl class solves. +*/ + +template> +class blocked_rangeNd_impl; + +template +class blocked_rangeNd_impl> { +public: + //! Type of a value. + using value_type = Value; + +private: + + //! Helper type to construct range with N tbb::blocked_range objects. + template + using dim_type_helper = tbb::blocked_range; + +public: + blocked_rangeNd_impl() = delete; + + //! Constructs N-dimensional range over N half-open intervals each represented as tbb::blocked_range. + blocked_rangeNd_impl(const dim_type_helper&... args) : my_dims{ {args...} } {} + + //! Dimensionality of a range. + static constexpr unsigned int ndims() { return N; } + + //! Range in certain dimension. + const tbb::blocked_range& dim(unsigned int dimension) const { + __TBB_ASSERT(dimension < N, "out of bound"); + return my_dims[dimension]; + } + + //------------------------------------------------------------------------ + // Methods that implement Range concept + //------------------------------------------------------------------------ + + //! True if at least one dimension is empty. + bool empty() const { + return std::any_of(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& d) { + return d.empty(); + }); + } + + //! True if at least one dimension is divisible. + bool is_divisible() const { + return std::any_of(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& d) { + return d.is_divisible(); + }); + } + +#if __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES + //! Static field to support proportional split. + static const bool is_splittable_in_proportion = true; + + blocked_rangeNd_impl(blocked_rangeNd_impl& r, proportional_split proportion) : my_dims(r.my_dims) { + do_split(r, proportion); + } +#endif + + blocked_rangeNd_impl(blocked_rangeNd_impl& r, split proportion) : my_dims(r.my_dims) { + do_split(r, proportion); + } + +private: + __TBB_STATIC_ASSERT(N != 0, "zero dimensional blocked_rangeNd can't be constructed"); + + //! Ranges in each dimension. + std::array, N> my_dims; + + template + void do_split(blocked_rangeNd_impl& r, split_type proportion) { + __TBB_STATIC_ASSERT((is_same_type::value + || is_same_type::value), + "type of split object is incorrect"); + __TBB_ASSERT(r.is_divisible(), "can't split not divisible range"); + + auto my_it = std::max_element(my_dims.begin(), my_dims.end(), [](const tbb::blocked_range& first, const tbb::blocked_range& second) { + return (first.size() * second.grainsize() < second.size() * first.grainsize()); + }); + + auto r_it = r.my_dims.begin() + (my_it - my_dims.begin()); + + my_it->my_begin = tbb::blocked_range::do_split(*r_it, proportion); + + // (!(my_it->my_begin < r_it->my_end) && !(r_it->my_end < my_it->my_begin)) equals to + // (my_it->my_begin == r_it->my_end), but we can't use operator== due to Value concept + __TBB_ASSERT(!(my_it->my_begin < r_it->my_end) && !(r_it->my_end < my_it->my_begin), + "blocked_range has been split incorrectly"); + } +}; + +} // namespace internal + +template +using blocked_rangeNd = internal::blocked_rangeNd_impl; + +} // namespace tbb + +#endif /* __TBB_CPP11_PRESENT && __TBB_CPP11_ARRAY_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT */ +#endif /* __TBB_blocked_rangeNd_H */ diff --git a/ohos/arm64-v8a/include/tbb/cache_aligned_allocator.h b/ohos/arm64-v8a/include/tbb/cache_aligned_allocator.h new file mode 100644 index 00000000..5b4897c4 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/cache_aligned_allocator.h @@ -0,0 +1,209 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_cache_aligned_allocator_H +#define __TBB_cache_aligned_allocator_H + +#include +#include "tbb_stddef.h" +#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC +#include // std::forward +#endif + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +#include +#endif + +namespace tbb { + +//! @cond INTERNAL +namespace internal { + //! Cache/sector line size. + /** @ingroup memory_allocation */ + size_t __TBB_EXPORTED_FUNC NFS_GetLineSize(); + + //! Allocate memory on cache/sector line boundary. + /** @ingroup memory_allocation */ + void* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, void* hint ); + + //! Free memory allocated by NFS_Allocate. + /** Freeing a NULL pointer is allowed, but has no effect. + @ingroup memory_allocation */ + void __TBB_EXPORTED_FUNC NFS_Free( void* ); +} +//! @endcond + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Workaround for erroneous "unreferenced parameter" warning in method destroy. + #pragma warning (push) + #pragma warning (disable: 4100) +#endif + +//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 +/** The members are ordered the same way they are in section 20.4.1 + of the ISO C++ standard. + @ingroup memory_allocation */ +template +class cache_aligned_allocator { +public: + typedef typename internal::allocator_type::value_type value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template struct rebind { + typedef cache_aligned_allocator other; + }; + cache_aligned_allocator() throw() {} + cache_aligned_allocator( const cache_aligned_allocator& ) throw() {} + template cache_aligned_allocator(const cache_aligned_allocator&) throw() {} + + pointer address(reference x) const {return &x;} + const_pointer address(const_reference x) const {return &x;} + + //! Allocate space for n objects, starting on a cache/sector line. + pointer allocate( size_type n, const void* hint=0 ) { + // The "hint" argument is always ignored in NFS_Allocate thus const_cast shouldn't hurt + return pointer(internal::NFS_Allocate( n, sizeof(value_type), const_cast(hint) )); + } + + //! Free block of memory that starts on a cache line + void deallocate( pointer p, size_type ) { + internal::NFS_Free(p); + } + + //! Largest value for which method allocate might succeed. + size_type max_size() const throw() { + return (~size_t(0)-internal::NFS_MaxLineSize)/sizeof(value_type); + } + + //! Copy-construct value at location pointed to by p. +#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + template + void construct(U *p, Args&&... args) + { ::new((void *)p) U(std::forward(args)...); } +#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC +#if __TBB_CPP11_RVALUE_REF_PRESENT + void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} +#endif + void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} +#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + + //! Destroy value at location pointed to by p. + void destroy( pointer p ) {p->~value_type();} +}; + +#if _MSC_VER && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif // warning 4100 is back + +//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 +/** @ingroup memory_allocation */ +template<> +class cache_aligned_allocator { +public: + typedef void* pointer; + typedef const void* const_pointer; + typedef void value_type; + template struct rebind { + typedef cache_aligned_allocator other; + }; +}; + +template +inline bool operator==( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return true;} + +template +inline bool operator!=( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return false;} + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT + +//! C++17 memory resource wrapper to ensure cache line size alignment +class cache_aligned_resource : public std::pmr::memory_resource { +public: + cache_aligned_resource() : cache_aligned_resource(std::pmr::get_default_resource()) {} + explicit cache_aligned_resource(std::pmr::memory_resource* upstream) : m_upstream(upstream) {} + + std::pmr::memory_resource* upstream_resource() const { + return m_upstream; + } + +private: + //! We don't know what memory resource set. Use padding to guarantee alignment + void* do_allocate(size_t bytes, size_t alignment) override { + size_t cache_line_alignment = correct_alignment(alignment); + uintptr_t base = (uintptr_t)m_upstream->allocate(correct_size(bytes) + cache_line_alignment); + __TBB_ASSERT(base != 0, "Upstream resource returned NULL."); +#if _MSC_VER && !defined(__INTEL_COMPILER) + // unary minus operator applied to unsigned type, result still unsigned + #pragma warning(push) + #pragma warning(disable: 4146 4706) +#endif + // Round up to the next cache line (align the base address) + uintptr_t result = (base + cache_line_alignment) & -cache_line_alignment; +#if _MSC_VER && !defined(__INTEL_COMPILER) + #pragma warning(pop) +#endif + // Record where block actually starts. + ((uintptr_t*)result)[-1] = base; + return (void*)result; + } + + void do_deallocate(void* ptr, size_t bytes, size_t alignment) override { + if (ptr) { + // Recover where block actually starts + uintptr_t base = ((uintptr_t*)ptr)[-1]; + m_upstream->deallocate((void*)base, correct_size(bytes) + correct_alignment(alignment)); + } + } + + bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override { + if (this == &other) { return true; } +#if __TBB_USE_OPTIONAL_RTTI + const cache_aligned_resource* other_res = dynamic_cast(&other); + return other_res && (this->upstream_resource() == other_res->upstream_resource()); +#else + return false; +#endif + } + + size_t correct_alignment(size_t alignment) { + __TBB_ASSERT(tbb::internal::is_power_of_two(alignment), "Alignment is not a power of 2"); +#if __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT + size_t cache_line_size = std::hardware_destructive_interference_size; +#else + size_t cache_line_size = internal::NFS_GetLineSize(); +#endif + return alignment < cache_line_size ? cache_line_size : alignment; + } + + size_t correct_size(size_t bytes) { + // To handle the case, when small size requested. There could be not + // enough space to store the original pointer. + return bytes < sizeof(uintptr_t) ? sizeof(uintptr_t) : bytes; + } + + std::pmr::memory_resource* m_upstream; +}; + +#endif /* __TBB_CPP17_MEMORY_RESOURCE_PRESENT */ + +} // namespace tbb + +#endif /* __TBB_cache_aligned_allocator_H */ + diff --git a/ohos/arm64-v8a/include/tbb/combinable.h b/ohos/arm64-v8a/include/tbb/combinable.h new file mode 100644 index 00000000..aa8d24b1 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/combinable.h @@ -0,0 +1,88 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_combinable_H +#define __TBB_combinable_H + +#define __TBB_combinable_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "enumerable_thread_specific.h" +#include "cache_aligned_allocator.h" + +namespace tbb { +/** \name combinable + **/ +//@{ +//! Thread-local storage with optional reduction +/** @ingroup containers */ + template + class combinable { + + private: + typedef typename tbb::cache_aligned_allocator my_alloc; + typedef typename tbb::enumerable_thread_specific my_ets_type; + my_ets_type my_ets; + + public: + + combinable() { } + + template + explicit combinable( finit _finit) : my_ets(_finit) { } + + //! destructor + ~combinable() { } + + combinable( const combinable& other) : my_ets(other.my_ets) { } + +#if __TBB_ETS_USE_CPP11 + combinable( combinable&& other) : my_ets( std::move(other.my_ets)) { } +#endif + + combinable & operator=( const combinable & other) { + my_ets = other.my_ets; + return *this; + } + +#if __TBB_ETS_USE_CPP11 + combinable & operator=( combinable && other) { + my_ets=std::move(other.my_ets); + return *this; + } +#endif + + void clear() { my_ets.clear(); } + + T& local() { return my_ets.local(); } + + T& local(bool & exists) { return my_ets.local(exists); } + + // combine_func_t has signature T(T,T) or T(const T&, const T&) + template + T combine(combine_func_t f_combine) { return my_ets.combine(f_combine); } + + // combine_func_t has signature void(T) or void(const T&) + template + void combine_each(combine_func_t f_combine) { my_ets.combine_each(f_combine); } + + }; +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_combinable_H_include_area + +#endif /* __TBB_combinable_H */ diff --git a/ohos/arm64-v8a/include/tbb/compat/condition_variable b/ohos/arm64-v8a/include/tbb/compat/condition_variable new file mode 100644 index 00000000..a6967817 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/compat/condition_variable @@ -0,0 +1,489 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_condition_variable_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_condition_variable_H +#pragma message("TBB Warning: tbb/compat/condition_variable is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_condition_variable_H +#define __TBB_condition_variable_H + +#define __TBB_condition_variable_H_include_area +#include "../internal/_warning_suppress_enable_notice.h" + +#if _WIN32||_WIN64 +#include "../machine/windows_api.h" + +namespace tbb { +namespace interface5 { +namespace internal { +struct condition_variable_using_event +{ + //! Event for blocking waiting threads. + HANDLE event; + //! Protects invariants involving n_waiters, release_count, and epoch. + CRITICAL_SECTION mutex; + //! Number of threads waiting on this condition variable + int n_waiters; + //! Number of threads remaining that should no longer wait on this condition variable. + int release_count; + //! To keep threads from waking up prematurely with earlier signals. + unsigned epoch; +}; +}}} // namespace tbb::interface5::internal + +#ifndef CONDITION_VARIABLE_INIT +typedef void* CONDITION_VARIABLE; +typedef CONDITION_VARIABLE* PCONDITION_VARIABLE; +#endif + +#else /* if not _WIN32||_WIN64 */ +#include // some systems need it for ETIMEDOUT +#include +#if __linux__ +#include +#else /* generic Unix */ +#include +#endif +#endif /* _WIN32||_WIN64 */ + +#include "../tbb_stddef.h" +#include "../mutex.h" +#include "../tbb_thread.h" +#include "../tbb_exception.h" +#include "../tbb_profiling.h" + +namespace tbb { + +namespace interface5 { + +// C++0x standard working draft 30.4.3 +// Lock tag types +struct __TBB_DEPRECATED_IN_VERBOSE_MODE defer_lock_t { }; //! do not acquire ownership of the mutex +struct __TBB_DEPRECATED_IN_VERBOSE_MODE try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking +struct __TBB_DEPRECATED_IN_VERBOSE_MODE adopt_lock_t { }; //! assume the calling thread has already +__TBB_DEPRECATED_IN_VERBOSE_MODE const defer_lock_t defer_lock = {}; +__TBB_DEPRECATED_IN_VERBOSE_MODE const try_to_lock_t try_to_lock = {}; +__TBB_DEPRECATED_IN_VERBOSE_MODE const adopt_lock_t adopt_lock = {}; + +// C++0x standard working draft 30.4.3.1 +//! lock_guard +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE lock_guard : tbb::internal::no_copy { +public: + //! mutex type + typedef M mutex_type; + + //! Constructor + /** precondition: If mutex_type is not a recursive mutex, the calling thread + does not own the mutex m. */ + explicit lock_guard(mutex_type& m) : pm(m) {m.lock();} + + //! Adopt_lock constructor + /** precondition: the calling thread owns the mutex m. */ + lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {} + + //! Destructor + ~lock_guard() { pm.unlock(); } +private: + mutex_type& pm; +}; + +// C++0x standard working draft 30.4.3.2 +//! unique_lock +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE unique_lock : tbb::internal::no_copy { + friend class condition_variable; +public: + typedef M mutex_type; + + // 30.4.3.2.1 construct/copy/destroy + // NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use. + //! Constructor + /** postcondition: pm==0 && owns==false */ + unique_lock() : pm(NULL), owns(false) {} + + //! Constructor + /** precondition: if mutex_type is not a recursive mutex, the calling thread + does not own the mutex m. If the precondition is not met, a deadlock occurs. + postcondition: pm==&m and owns==true */ + explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;} + + //! Defer_lock constructor + /** postcondition: pm==&m and owns==false */ + unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {} + + //! Try_to_lock constructor + /** precondition: if mutex_type is not a recursive mutex, the calling thread + does not own the mutex m. If the precondition is not met, a deadlock occurs. + postcondition: pm==&m and owns==res where res is the value returned by + the call to m.try_lock(). */ + unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();} + + //! Adopt_lock constructor + /** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail. + postcondition: pm==&m and owns==true */ + unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {} + + //! Timed unique_lock acquisition. + /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that + it uses tbb::tick_count::interval_t to specify the time duration. */ + unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );} + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move constructor + /** postconditions: pm == src_p.pm and owns == src_p.owns (where src_p is the state of src just prior to this + construction), src.pm == 0 and src.owns == false. */ + unique_lock(unique_lock && src): pm(NULL), owns(false) {this->swap(src);} + + //! Move assignment + /** effects: If owns calls pm->unlock(). + Postconditions: pm == src_p.pm and owns == src_p.owns (where src_p is the state of src just prior to this + assignment), src.pm == 0 and src.owns == false. */ + unique_lock& operator=(unique_lock && src) { + if (owns) + this->unlock(); + pm = NULL; + this->swap(src); + return *this; + } +#endif // __TBB_CPP11_RVALUE_REF_PRESENT + + //! Destructor + ~unique_lock() { if( owns ) pm->unlock(); } + + // 30.4.3.2.2 locking + //! Lock the mutex and own it. + void lock() { + if( pm ) { + if( !owns ) { + pm->lock(); + owns = true; + } else + throw_exception_v4( tbb::internal::eid_possible_deadlock ); + } else + throw_exception_v4( tbb::internal::eid_operation_not_permitted ); + __TBB_ASSERT( owns, NULL ); + } + + //! Try to lock the mutex. + /** If successful, note that this lock owns it. Otherwise, set it false. */ + bool try_lock() { + if( pm ) { + if( !owns ) + owns = pm->try_lock(); + else + throw_exception_v4( tbb::internal::eid_possible_deadlock ); + } else + throw_exception_v4( tbb::internal::eid_operation_not_permitted ); + return owns; + } + + //! Try to lock the mutex. + bool try_lock_for( const tick_count::interval_t &i ); + + //! Unlock the mutex + /** And note that this lock no longer owns it. */ + void unlock() { + if( owns ) { + pm->unlock(); + owns = false; + } else + throw_exception_v4( tbb::internal::eid_operation_not_permitted ); + __TBB_ASSERT( !owns, NULL ); + } + + // 30.4.3.2.3 modifiers + //! Swap the two unique locks + void swap(unique_lock& u) { + mutex_type* t_pm = u.pm; u.pm = pm; pm = t_pm; + bool t_owns = u.owns; u.owns = owns; owns = t_owns; + } + + //! Release control over the mutex. + mutex_type* release() { + mutex_type* o_pm = pm; + pm = NULL; + owns = false; + return o_pm; + } + + // 30.4.3.2.4 observers + //! Does this lock own the mutex? + bool owns_lock() const { return owns; } + + // TODO: Un-comment 'explicit' when the last non-C++0x compiler support is dropped + //! Does this lock own the mutex? + /*explicit*/ operator bool() const { return owns; } + + //! Return the mutex that this lock currently has. + mutex_type* mutex() const { return pm; } + +private: + mutex_type* pm; + bool owns; +}; + +template +__TBB_DEPRECATED_IN_VERBOSE_MODE bool unique_lock::try_lock_for( const tick_count::interval_t &i) +{ + const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */ + // the smallest wait-time is 0.1 milliseconds. + bool res = pm->try_lock(); + int duration_in_micro; + if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) { + tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3 + do { + this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds + duration_in_micro -= unique_lock_tick; + res = pm->try_lock(); + } while( !res && duration_in_micro>unique_lock_tick ); + } + return (owns=res); +} + +//! Swap the two unique locks that have the mutexes of same type +template +void swap(unique_lock& x, unique_lock& y) { x.swap( y ); } + +namespace internal { + +#if _WIN32||_WIN64 +union condvar_impl_t { + condition_variable_using_event cv_event; + CONDITION_VARIABLE cv_native; +}; +void __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv ); +void __TBB_EXPORTED_FUNC internal_destroy_condition_variable( condvar_impl_t& cv ); +void __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv ); +void __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv ); +bool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL ); + +#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */ +typedef pthread_cond_t condvar_impl_t; +#endif + +} // namespace internal + +//! cv_status +/** C++0x standard working draft 30.5 */ +enum cv_status { no_timeout, timeout }; + +//! condition variable +/** C++0x standard working draft 30.5.1 + @ingroup synchronization */ +class __TBB_DEPRECATED_IN_VERBOSE_MODE condition_variable : tbb::internal::no_copy { +public: + //! Constructor + condition_variable() { +#if _WIN32||_WIN64 + internal_initialize_condition_variable( my_cv ); +#else + pthread_cond_init( &my_cv, NULL ); +#endif + } + + //! Destructor + ~condition_variable() { + //precondition: There shall be no thread blocked on *this. +#if _WIN32||_WIN64 + internal_destroy_condition_variable( my_cv ); +#else + pthread_cond_destroy( &my_cv ); +#endif + } + + //! Notify one thread and wake it up + void notify_one() { +#if _WIN32||_WIN64 + internal_condition_variable_notify_one( my_cv ); +#else + pthread_cond_signal( &my_cv ); +#endif + } + + //! Notify all threads + void notify_all() { +#if _WIN32||_WIN64 + internal_condition_variable_notify_all( my_cv ); +#else + pthread_cond_broadcast( &my_cv ); +#endif + } + + //! Release the mutex associated with the lock and wait on this condition variable + void wait(unique_lock& lock); + + //! Wait on this condition variable while pred is false + template + void wait(unique_lock& lock, Predicate pred) { + while( !pred() ) + wait( lock ); + } + + //! Timed version of wait() + cv_status wait_for(unique_lock& lock, const tick_count::interval_t &i ); + + //! Timed version of the predicated wait + /** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */ + template + bool wait_for(unique_lock& lock, const tick_count::interval_t &i, Predicate pred) + { + while( !pred() ) { + cv_status st = wait_for( lock, i ); + if( st==timeout ) + return pred(); + } + return true; + } + + // C++0x standard working draft. 30.2.3 + typedef internal::condvar_impl_t* native_handle_type; + + native_handle_type native_handle() { return (native_handle_type) &my_cv; } + +private: + internal::condvar_impl_t my_cv; +}; + + +#if _WIN32||_WIN64 +inline void condition_variable::wait( unique_lock& lock ) +{ + __TBB_ASSERT( lock.owns, NULL ); + lock.owns = false; + if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) { + int ec = GetLastError(); + // on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT + __TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL ); + lock.owns = true; + throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); + } + lock.owns = true; +} + +inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) +{ + cv_status rc = no_timeout; + __TBB_ASSERT( lock.owns, NULL ); + lock.owns = false; + // condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait() + if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) { + int ec = GetLastError(); + if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT ) + rc = timeout; + else { + lock.owns = true; + throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); + } + } + lock.owns = true; + return rc; +} + +#else /* !(_WIN32||_WIN64) */ +inline void condition_variable::wait( unique_lock& lock ) +{ + __TBB_ASSERT( lock.owns, NULL ); + lock.owns = false; + if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) { + lock.owns = true; + throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); + } + // upon successful return, the mutex has been locked and is owned by the calling thread. + lock.owns = true; +} + +inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) +{ +#if __linux__ + struct timespec req; + double sec = i.seconds(); + clock_gettime( CLOCK_REALTIME, &req ); + req.tv_sec += static_cast(sec); + req.tv_nsec += static_cast( (sec - static_cast(sec))*1e9 ); +#else /* generic Unix */ + struct timeval tv; + struct timespec req; + double sec = i.seconds(); + int status = gettimeofday(&tv, NULL); + __TBB_ASSERT_EX( status==0, "gettimeofday failed" ); + req.tv_sec = tv.tv_sec + static_cast(sec); + req.tv_nsec = tv.tv_usec*1000 + static_cast( (sec - static_cast(sec))*1e9 ); +#endif /*(choice of OS) */ + if( req.tv_nsec>=1e9 ) { + req.tv_sec += 1; + req.tv_nsec -= static_cast(1e9); + } + __TBB_ASSERT( 0<=req.tv_nsec && req.tv_nsec<1e9, NULL ); + + int ec; + cv_status rc = no_timeout; + __TBB_ASSERT( lock.owns, NULL ); + lock.owns = false; + if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) { + if( ec==ETIMEDOUT ) + rc = timeout; + else { + __TBB_ASSERT( lock.try_lock()==false, NULL ); + lock.owns = true; + throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); + } + } + lock.owns = true; + return rc; +} +#endif /* !(_WIN32||_WIN64) */ + +} // namespace interface5 + +__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable) + +} // namespace tbb + +#if TBB_IMPLEMENT_CPP0X + +namespace std { + +using tbb::interface5::defer_lock_t; +using tbb::interface5::try_to_lock_t; +using tbb::interface5::adopt_lock_t; +using tbb::interface5::defer_lock; +using tbb::interface5::try_to_lock; +using tbb::interface5::adopt_lock; +using tbb::interface5::lock_guard; +using tbb::interface5::unique_lock; +using tbb::interface5::swap; /* this is for void std::swap(unique_lock&,unique_lock&) */ +using tbb::interface5::condition_variable; +using tbb::interface5::cv_status; +using tbb::interface5::timeout; +using tbb::interface5::no_timeout; + +} // namespace std + +#endif /* TBB_IMPLEMENT_CPP0X */ + +#include "../internal/_warning_suppress_disable_notice.h" +#undef __TBB_condition_variable_H_include_area + +#endif /* __TBB_condition_variable_H */ diff --git a/ohos/arm64-v8a/include/tbb/compat/ppl.h b/ohos/arm64-v8a/include/tbb/compat/ppl.h new file mode 100644 index 00000000..f441b038 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/compat/ppl.h @@ -0,0 +1,75 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_ppl_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_ppl_H +#pragma message("TBB Warning: tbb/compat/ppl.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_compat_ppl_H +#define __TBB_compat_ppl_H + +#define __TBB_ppl_H_include_area +#include "../internal/_warning_suppress_enable_notice.h" + +#include "../task_group.h" +#include "../parallel_invoke.h" +#include "../parallel_for_each.h" +#include "../parallel_for.h" +#include "../tbb_exception.h" +#include "../critical_section.h" +#include "../reader_writer_lock.h" +#include "../combinable.h" + +namespace Concurrency { + +#if __TBB_TASK_GROUP_CONTEXT + using tbb::task_handle; + using tbb::task_group_status; + using tbb::task_group; + using tbb::structured_task_group; + using tbb::invalid_multiple_scheduling; + using tbb::missing_wait; + using tbb::make_task; + + using tbb::not_complete; + using tbb::complete; + using tbb::canceled; + + using tbb::is_current_task_group_canceling; +#endif /* __TBB_TASK_GROUP_CONTEXT */ + + using tbb::parallel_invoke; + using tbb::strict_ppl::parallel_for; + using tbb::parallel_for_each; + using tbb::critical_section; + using tbb::reader_writer_lock; + using tbb::combinable; + + using tbb::improper_lock; + +} // namespace Concurrency + +#include "../internal/_warning_suppress_disable_notice.h" +#undef __TBB_ppl_H_include_area + +#endif /* __TBB_compat_ppl_H */ diff --git a/ohos/arm64-v8a/include/tbb/compat/thread b/ohos/arm64-v8a/include/tbb/compat/thread new file mode 100644 index 00000000..8b8a13d7 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/compat/thread @@ -0,0 +1,73 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_thread_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_thread_H +#pragma message("TBB Warning: tbb/compat/thread is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_thread_H +#define __TBB_thread_H + +#define __TBB_thread_H_include_area +#include "../internal/_warning_suppress_enable_notice.h" + +#include "../tbb_config.h" + +#if TBB_IMPLEMENT_CPP0X + +#include "../tbb_thread.h" + +namespace std { + +typedef tbb::tbb_thread thread; + +namespace this_thread { + using tbb::this_tbb_thread::get_id; + using tbb::this_tbb_thread::yield; + + __TBB_DEPRECATED_IN_VERBOSE_MODE inline void sleep_for(const tbb::tick_count::interval_t& rel_time) { + tbb::internal::thread_sleep_v3( rel_time ); + } +} + +} // namespace std + +#else /* TBB_IMPLEMENT_CPP0X */ + +#define __TBB_COMPAT_THREAD_RECURSION_PROTECTOR 1 +#include +#undef __TBB_COMPAT_THREAD_RECURSION_PROTECTOR + +#endif /* TBB_IMPLEMENT_CPP0X */ + +#include "../internal/_warning_suppress_disable_notice.h" +#undef __TBB_thread_H_include_area + +#else /* __TBB_thread_H */ + +#if __TBB_COMPAT_THREAD_RECURSION_PROTECTOR +#error The tbb/compat/thread header attempts to include itself. \ + Please make sure that {TBBROOT}/include/tbb/compat is NOT in include paths. +#endif + +#endif /* __TBB_thread_H */ diff --git a/ohos/arm64-v8a/include/tbb/compat/tuple b/ohos/arm64-v8a/include/tbb/compat/tuple new file mode 100644 index 00000000..c568ef3d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/compat/tuple @@ -0,0 +1,501 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_tuple_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_tuple_H +#pragma message("TBB Warning: tbb/compat/tuple is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_tuple_H +#define __TBB_tuple_H + +#define __TBB_tuple_H_include_area +#include "../internal/_warning_suppress_enable_notice.h" + +#include +#include "../tbb_stddef.h" + +// build preprocessor variables for varying number of arguments +// Need the leading comma so the empty __TBB_T_PACK will not cause a syntax error. +#if __TBB_VARIADIC_MAX <= 5 +#define __TBB_T_PACK +#define __TBB_U_PACK +#define __TBB_TYPENAME_T_PACK +#define __TBB_TYPENAME_U_PACK +#define __TBB_NULL_TYPE_PACK +#define __TBB_REF_T_PARAM_PACK +#define __TBB_CONST_REF_T_PARAM_PACK +#define __TBB_T_PARAM_LIST_PACK +#define __TBB_CONST_NULL_REF_PACK +// +#elif __TBB_VARIADIC_MAX == 6 +#define __TBB_T_PACK ,__T5 +#define __TBB_U_PACK ,__U5 +#define __TBB_TYPENAME_T_PACK , typename __T5 +#define __TBB_TYPENAME_U_PACK , typename __U5 +#define __TBB_NULL_TYPE_PACK , null_type +#define __TBB_REF_T_PARAM_PACK ,__T5& t5 +#define __TBB_CONST_REF_T_PARAM_PACK ,const __T5& t5 +#define __TBB_T_PARAM_LIST_PACK ,t5 +#define __TBB_CONST_NULL_REF_PACK , const null_type& +// +#elif __TBB_VARIADIC_MAX == 7 +#define __TBB_T_PACK ,__T5, __T6 +#define __TBB_U_PACK ,__U5, __U6 +#define __TBB_TYPENAME_T_PACK , typename __T5 , typename __T6 +#define __TBB_TYPENAME_U_PACK , typename __U5 , typename __U6 +#define __TBB_NULL_TYPE_PACK , null_type, null_type +#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6 +#define __TBB_CONST_REF_T_PARAM_PACK ,const __T5& t5, const __T6& t6 +#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 +#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type& +// +#elif __TBB_VARIADIC_MAX == 8 +#define __TBB_T_PACK ,__T5, __T6, __T7 +#define __TBB_U_PACK ,__U5, __U6, __U7 +#define __TBB_TYPENAME_T_PACK , typename __T5 , typename __T6, typename __T7 +#define __TBB_TYPENAME_U_PACK , typename __U5 , typename __U6, typename __U7 +#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type +#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7 +#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7 +#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 +#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type& +// +#elif __TBB_VARIADIC_MAX == 9 +#define __TBB_T_PACK ,__T5, __T6, __T7, __T8 +#define __TBB_U_PACK ,__U5, __U6, __U7, __U8 +#define __TBB_TYPENAME_T_PACK , typename __T5, typename __T6, typename __T7, typename __T8 +#define __TBB_TYPENAME_U_PACK , typename __U5, typename __U6, typename __U7, typename __U8 +#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type, null_type +#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7, __T8& t8 +#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7, const __T8& t8 +#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 ,t8 +#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&, const null_type& +// +#elif __TBB_VARIADIC_MAX >= 10 +#define __TBB_T_PACK ,__T5, __T6, __T7, __T8, __T9 +#define __TBB_U_PACK ,__U5, __U6, __U7, __U8, __U9 +#define __TBB_TYPENAME_T_PACK , typename __T5, typename __T6, typename __T7, typename __T8, typename __T9 +#define __TBB_TYPENAME_U_PACK , typename __U5, typename __U6, typename __U7, typename __U8, typename __U9 +#define __TBB_NULL_TYPE_PACK , null_type, null_type, null_type, null_type, null_type +#define __TBB_REF_T_PARAM_PACK ,__T5& t5, __T6& t6, __T7& t7, __T8& t8, __T9& t9 +#define __TBB_CONST_REF_T_PARAM_PACK , const __T5& t5, const __T6& t6, const __T7& t7, const __T8& t8, const __T9& t9 +#define __TBB_T_PARAM_LIST_PACK ,t5 ,t6 ,t7 ,t8 ,t9 +#define __TBB_CONST_NULL_REF_PACK , const null_type&, const null_type&, const null_type&, const null_type&, const null_type& +#endif + + + +namespace tbb { +namespace interface5 { + +namespace internal { +struct null_type { }; +} +using internal::null_type; + +// tuple forward declaration +template = 6 +, typename __T5=null_type +#if __TBB_VARIADIC_MAX >= 7 +, typename __T6=null_type +#if __TBB_VARIADIC_MAX >= 8 +, typename __T7=null_type +#if __TBB_VARIADIC_MAX >= 9 +, typename __T8=null_type +#if __TBB_VARIADIC_MAX >= 10 +, typename __T9=null_type +#endif +#endif +#endif +#endif +#endif +> +class tuple; + +namespace internal { + +// const null_type temp +inline const null_type cnull() { return null_type(); } + +// cons forward declaration +template struct cons; + +// type of a component of the cons +template +struct component { + typedef typename __T::tail_type next; + typedef typename component<__N-1,next>::type type; +}; + +template +struct component<0,__T> { + typedef typename __T::head_type type; +}; + +template<> +struct component<0,null_type> { + typedef null_type type; +}; + +// const version of component + +template +struct component<__N, const __T> +{ + typedef typename __T::tail_type next; + typedef const typename component<__N-1,next>::type type; +}; + +template +struct component<0, const __T> +{ + typedef const typename __T::head_type type; +}; + + +// helper class for getting components of cons +template< int __N> +struct get_helper { +template +inline static typename component<__N, cons<__HT,__TT> >::type& get(cons<__HT,__TT>& ti) { + return get_helper<__N-1>::get(ti.tail); +} +template +inline static typename component<__N, cons<__HT,__TT> >::type const& get(const cons<__HT,__TT>& ti) { + return get_helper<__N-1>::get(ti.tail); +} +}; + +template<> +struct get_helper<0> { +template +inline static typename component<0, cons<__HT,__TT> >::type& get(cons<__HT,__TT>& ti) { + return ti.head; +} +template +inline static typename component<0, cons<__HT,__TT> >::type const& get(const cons<__HT,__TT>& ti) { + return ti.head; +} +}; + +// traits adaptor +template +struct tuple_traits { + typedef cons <__T0, typename tuple_traits<__T1, __T2, __T3, __T4 __TBB_T_PACK , null_type>::U > U; +}; + +template +struct tuple_traits<__T0, null_type, null_type, null_type, null_type __TBB_NULL_TYPE_PACK > { + typedef cons<__T0, null_type> U; +}; + +template<> +struct tuple_traits { + typedef null_type U; +}; + + +// core cons defs +template +struct cons{ + + typedef __HT head_type; + typedef __TT tail_type; + + head_type head; + tail_type tail; + + static const int length = 1 + tail_type::length; + + // default constructors + explicit cons() : head(), tail() { } + + // non-default constructors + cons(head_type& h, const tail_type& t) : head(h), tail(t) { } + + template + cons(const __T0& t0, const __T1& t1, const __T2& t2, const __T3& t3, const __T4& t4 __TBB_CONST_REF_T_PARAM_PACK) : + head(t0), tail(t1, t2, t3, t4 __TBB_T_PARAM_LIST_PACK, cnull()) { } + + template + cons(__T0& t0, __T1& t1, __T2& t2, __T3& t3, __T4& t4 __TBB_REF_T_PARAM_PACK) : + head(t0), tail(t1, t2, t3, t4 __TBB_T_PARAM_LIST_PACK , cnull()) { } + + template + cons(const cons<__HT1,__TT1>& other) : head(other.head), tail(other.tail) { } + + cons& operator=(const cons& other) { head = other.head; tail = other.tail; return *this; } + + friend bool operator==(const cons& me, const cons& other) { + return me.head == other.head && me.tail == other.tail; + } + friend bool operator<(const cons& me, const cons& other) { + return me.head < other.head || (!(other.head < me.head) && me.tail < other.tail); + } + friend bool operator>(const cons& me, const cons& other) { return other=(const cons& me, const cons& other) { return !(meother); } + + template + friend bool operator==(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { + return me.head == other.head && me.tail == other.tail; + } + + template + friend bool operator<(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { + return me.head < other.head || (!(other.head < me.head) && me.tail < other.tail); + } + + template + friend bool operator>(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return other + friend bool operator!=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me==other); } + + template + friend bool operator>=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me + friend bool operator<=(const cons<__HT,__TT>& me, const cons<__HT1,__TT1>& other) { return !(me>other); } + + +}; // cons + + +template +struct cons<__HT,null_type> { + + typedef __HT head_type; + typedef null_type tail_type; + + head_type head; + + static const int length = 1; + + // default constructor + cons() : head() { /*std::cout << "default constructor 1\n";*/ } + + cons(const null_type&, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head() { /*std::cout << "default constructor 2\n";*/ } + + // non-default constructor + template + cons(__T1& t1, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(t1) { /*std::cout << "non-default a1, t1== " << t1 << "\n";*/} + + cons(head_type& h, const null_type& = null_type() ) : head(h) { } + cons(const head_type& t0, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(t0) { } + + // converting constructor + template + cons(__HT1 h1, const null_type&, const null_type&, const null_type&, const null_type& __TBB_CONST_NULL_REF_PACK) : head(h1) { } + + // copy constructor + template + cons( const cons<__HT1, null_type>& other) : head(other.head) { } + + // assignment operator + cons& operator=(const cons& other) { head = other.head; return *this; } + + friend bool operator==(const cons& me, const cons& other) { return me.head == other.head; } + friend bool operator<(const cons& me, const cons& other) { return me.head < other.head; } + friend bool operator>(const cons& me, const cons& other) { return otherother); } + friend bool operator>=(const cons& me, const cons& other) {return !(me + friend bool operator==(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { + return me.head == other.head; + } + + template + friend bool operator<(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { + return me.head < other.head; + } + + template + friend bool operator>(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return other + friend bool operator!=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me==other); } + + template + friend bool operator<=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me>other); } + + template + friend bool operator>=(const cons<__HT,null_type>& me, const cons<__HT1,null_type>& other) { return !(me +struct cons { typedef null_type tail_type; static const int length = 0; }; + +// wrapper for default constructor +template +inline const __T wrap_dcons(__T*) { return __T(); } + +} // namespace internal + +// tuple definition +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE tuple : public internal::tuple_traits<__T0, __T1, __T2, __T3, __T4 __TBB_T_PACK >::U { + // friends + template friend class tuple_size; + template friend struct tuple_element; + + // stl components + typedef tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > value_type; + typedef value_type *pointer; + typedef const value_type *const_pointer; + typedef value_type &reference; + typedef const value_type &const_reference; + typedef size_t size_type; + + typedef typename internal::tuple_traits<__T0,__T1,__T2,__T3, __T4 __TBB_T_PACK >::U my_cons; + +public: + __TBB_DEPRECATED_IN_VERBOSE_MODE tuple(const __T0& t0=internal::wrap_dcons((__T0*)NULL) + ,const __T1& t1=internal::wrap_dcons((__T1*)NULL) + ,const __T2& t2=internal::wrap_dcons((__T2*)NULL) + ,const __T3& t3=internal::wrap_dcons((__T3*)NULL) + ,const __T4& t4=internal::wrap_dcons((__T4*)NULL) +#if __TBB_VARIADIC_MAX >= 6 + ,const __T5& t5=internal::wrap_dcons((__T5*)NULL) +#if __TBB_VARIADIC_MAX >= 7 + ,const __T6& t6=internal::wrap_dcons((__T6*)NULL) +#if __TBB_VARIADIC_MAX >= 8 + ,const __T7& t7=internal::wrap_dcons((__T7*)NULL) +#if __TBB_VARIADIC_MAX >= 9 + ,const __T8& t8=internal::wrap_dcons((__T8*)NULL) +#if __TBB_VARIADIC_MAX >= 10 + ,const __T9& t9=internal::wrap_dcons((__T9*)NULL) +#endif +#endif +#endif +#endif +#endif + ) : + my_cons(t0,t1,t2,t3,t4 __TBB_T_PARAM_LIST_PACK) { } + + template + struct internal_tuple_element { + typedef typename internal::component<__N,my_cons>::type type; + }; + + template + typename internal_tuple_element<__N>::type& get() { return internal::get_helper<__N>::get(*this); } + + template + typename internal_tuple_element<__N>::type const& get() const { return internal::get_helper<__N>::get(*this); } + + template + tuple& operator=(const internal::cons<__U1,__U2>& other) { + my_cons::operator=(other); + return *this; + } + + template + tuple& operator=(const std::pair<__U1,__U2>& other) { + // __TBB_ASSERT(tuple_size::value == 2, "Invalid size for pair to tuple assignment"); + this->head = other.first; + this->tail.head = other.second; + return *this; + } + + friend bool operator==(const tuple& me, const tuple& other) {return static_cast(me)==(other);} + friend bool operator<(const tuple& me, const tuple& other) {return static_cast(me)<(other);} + friend bool operator>(const tuple& me, const tuple& other) {return static_cast(me)>(other);} + friend bool operator!=(const tuple& me, const tuple& other) {return static_cast(me)!=(other);} + friend bool operator>=(const tuple& me, const tuple& other) {return static_cast(me)>=(other);} + friend bool operator<=(const tuple& me, const tuple& other) {return static_cast(me)<=(other);} + +}; // tuple + +// empty tuple +template<> +class __TBB_DEPRECATED_IN_VERBOSE_MODE tuple : public null_type { +}; + +// helper classes + +template < typename __T> +class tuple_size { +public: + static const size_t value = 1 + tuple_size::value; +}; + +template <> +class tuple_size > { +public: + static const size_t value = 0; +}; + +template <> +class tuple_size { +public: + static const size_t value = 0; +}; + +template +struct tuple_element { + typedef typename internal::component<__N, typename __T::my_cons>::type type; +}; + +template +inline static typename tuple_element<__N,tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > >::type& + get(tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK >& t) { return internal::get_helper<__N>::get(t); } + +template +inline static typename tuple_element<__N,tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK > >::type const& + get(const tuple<__T0,__T1,__T2,__T3,__T4 __TBB_T_PACK >& t) { return internal::get_helper<__N>::get(t); } + +} // interface5 +} // tbb + +#if !__TBB_CPP11_TUPLE_PRESENT +namespace tbb { + namespace flow { + using tbb::interface5::tuple; + using tbb::interface5::tuple_size; + using tbb::interface5::tuple_element; + using tbb::interface5::get; + } +} +#endif + +#undef __TBB_T_PACK +#undef __TBB_U_PACK +#undef __TBB_TYPENAME_T_PACK +#undef __TBB_TYPENAME_U_PACK +#undef __TBB_NULL_TYPE_PACK +#undef __TBB_REF_T_PARAM_PACK +#undef __TBB_CONST_REF_T_PARAM_PACK +#undef __TBB_T_PARAM_LIST_PACK +#undef __TBB_CONST_NULL_REF_PACK + +#include "../internal/_warning_suppress_disable_notice.h" +#undef __TBB_tuple_H_include_area + +#endif /* __TBB_tuple_H */ diff --git a/ohos/arm64-v8a/include/tbb/concurrent_hash_map.h b/ohos/arm64-v8a/include/tbb/concurrent_hash_map.h new file mode 100644 index 00000000..80bad97b --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_hash_map.h @@ -0,0 +1,1650 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_hash_map_H +#define __TBB_concurrent_hash_map_H + +#define __TBB_concurrent_hash_map_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_stddef.h" +#include +#include // Need std::pair +#include // Need std::memset +#include __TBB_STD_SWAP_HEADER + +#include "tbb_allocator.h" +#include "spin_rw_mutex.h" +#include "atomic.h" +#include "tbb_exception.h" +#include "tbb_profiling.h" +#include "aligned_space.h" +#include "internal/_tbb_hash_compare_impl.h" +#include "internal/_template_helpers.h" +#include "internal/_allocator_traits.h" +#if __TBB_INITIALIZER_LISTS_PRESENT +#include +#endif +#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS +#include +#endif +#if __TBB_STATISTICS +#include +#endif +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT +// Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT +// for most of platforms, tuple present macro was added for logical correctness +#include +#endif + +namespace tbb { + +namespace interface5 { + + template, typename A = tbb_allocator > > + class concurrent_hash_map; + + //! @cond INTERNAL + namespace internal { + using namespace tbb::internal; + + + //! Type of a hash code. + typedef size_t hashcode_t; + //! Node base type + struct hash_map_node_base : tbb::internal::no_copy { + //! Mutex type + typedef spin_rw_mutex mutex_t; + //! Scoped lock type for mutex + typedef mutex_t::scoped_lock scoped_t; + //! Next node in chain + hash_map_node_base *next; + mutex_t mutex; + }; + //! Incompleteness flag value + static hash_map_node_base *const rehash_req = reinterpret_cast(size_t(3)); + //! Rehashed empty bucket flag + static hash_map_node_base *const empty_rehashed = reinterpret_cast(size_t(0)); + //! base class of concurrent_hash_map + class hash_map_base { + public: + //! Size type + typedef size_t size_type; + //! Type of a hash code. + typedef size_t hashcode_t; + //! Segment index type + typedef size_t segment_index_t; + //! Node base type + typedef hash_map_node_base node_base; + //! Bucket type + struct bucket : tbb::internal::no_copy { + //! Mutex type for buckets + typedef spin_rw_mutex mutex_t; + //! Scoped lock type for mutex + typedef mutex_t::scoped_lock scoped_t; + mutex_t mutex; + node_base *node_list; + }; + //! Count of segments in the first block + static size_type const embedded_block = 1; + //! Count of segments in the first block + static size_type const embedded_buckets = 1< my_mask; + //! Segment pointers table. Also prevents false sharing between my_mask and my_size + segments_table_t my_table; + //! Size of container in stored items + atomic my_size; // It must be in separate cache line from my_mask due to performance effects + //! Zero segment + bucket my_embedded_segment[embedded_buckets]; +#if __TBB_STATISTICS + atomic my_info_resizes; // concurrent ones + mutable atomic my_info_restarts; // race collisions + atomic my_info_rehashes; // invocations of rehash_bucket +#endif + //! Constructor + hash_map_base() { + std::memset(my_table, 0, sizeof(my_table)); + my_mask = 0; + my_size = 0; + std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment)); + for( size_type i = 0; i < embedded_block; i++ ) // fill the table + my_table[i] = my_embedded_segment + segment_base(i); + my_mask = embedded_buckets - 1; + __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks"); +#if __TBB_STATISTICS + my_info_resizes = 0; // concurrent ones + my_info_restarts = 0; // race collisions + my_info_rehashes = 0; // invocations of rehash_bucket +#endif + } + + //! @return segment index of given index in the array + static segment_index_t segment_index_of( size_type index ) { + return segment_index_t( __TBB_Log2( index|1 ) ); + } + + //! @return the first array index of given segment + static segment_index_t segment_base( segment_index_t k ) { + return (segment_index_t(1)<(ptr) > uintptr_t(63); + } + + //! Initialize buckets + static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) { + if( is_initial ) std::memset( static_cast(ptr), 0, sz*sizeof(bucket) ); + else for(size_type i = 0; i < sz; i++, ptr++) { + *reinterpret_cast(&ptr->mutex) = 0; + ptr->node_list = rehash_req; + } + } + + //! Add node @arg n to bucket @arg b + static void add_to_bucket( bucket *b, node_base *n ) { + __TBB_ASSERT(b->node_list != rehash_req, NULL); + n->next = b->node_list; + b->node_list = n; // its under lock and flag is set + } + + //! Exception safety helper + struct enable_segment_failsafe : tbb::internal::no_copy { + segment_ptr_t *my_segment_ptr; + enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {} + ~enable_segment_failsafe() { + if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress + } + }; + + //! Enable segment + template + void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) { + typedef typename tbb::internal::allocator_rebind::type bucket_allocator_type; + typedef tbb::internal::allocator_traits bucket_allocator_traits; + bucket_allocator_type bucket_allocator(allocator); + __TBB_ASSERT( k, "Zero segment must be embedded" ); + enable_segment_failsafe watchdog( my_table, k ); + size_type sz; + __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment"); + if( k >= first_block ) { + sz = segment_size( k ); + segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz); + init_buckets( ptr, sz, is_initial ); + itt_hide_store_word( my_table[k], ptr ); + sz <<= 1;// double it to get entire capacity of the container + } else { // the first block + __TBB_ASSERT( k == embedded_block, "Wrong segment index" ); + sz = segment_size( first_block ); + segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets); + init_buckets( ptr, sz - embedded_buckets, is_initial ); + ptr -= segment_base(embedded_block); + for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets + itt_hide_store_word( my_table[i], ptr + segment_base(i) ); + } + itt_store_word_with_release( my_mask, sz-1 ); + watchdog.my_segment_ptr = 0; + } + + template + void delete_segment(segment_index_t s, const Allocator& allocator) { + typedef typename tbb::internal::allocator_rebind::type bucket_allocator_type; + typedef tbb::internal::allocator_traits bucket_allocator_traits; + bucket_allocator_type bucket_allocator(allocator); + segment_ptr_t buckets_ptr = my_table[s]; + size_type sz = segment_size( s ? s : 1 ); + + if( s >= first_block) // the first segment or the next + bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz); + else if( s == embedded_block && embedded_block != first_block ) + bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, + segment_size(first_block) - embedded_buckets); + if( s >= embedded_block ) my_table[s] = 0; + } + + //! Get bucket by (masked) hashcode + bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere? + segment_index_t s = segment_index_of( h ); + h -= segment_base(s); + segment_ptr_t seg = my_table[s]; + __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" ); + return &seg[h]; + } + + // internal serial rehashing helper + void mark_rehashed_levels( hashcode_t h ) throw () { + segment_index_t s = segment_index_of( h ); + while( segment_ptr_t seg = my_table[++s] ) + if( seg[h].node_list == rehash_req ) { + seg[h].node_list = empty_rehashed; + mark_rehashed_levels( h + ((hashcode_t)1<node_list) != rehash_req ) + { +#if __TBB_STATISTICS + my_info_restarts++; // race collisions +#endif + return true; + } + } + return false; + } + + //! Insert a node and check for load factor. @return segment index to enable. + segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) { + size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted + add_to_bucket( b, n ); + // check load factor + if( sz >= mask ) { // TODO: add custom load_factor + segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of + __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated"); + static const segment_ptr_t is_allocating = (segment_ptr_t)2; + if( !itt_hide_load_word(my_table[new_seg]) + && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL ) + return new_seg; // The value must be processed + } + return 0; + } + + //! Prepare enough segments for number of buckets + template + void reserve(size_type buckets, const Allocator& allocator) { + if( !buckets-- ) return; + bool is_initial = !my_size; + for( size_type m = my_mask; buckets > m; m = my_mask ) + enable_segment( segment_index_of( m+1 ), allocator, is_initial ); + } + //! Swap hash_map_bases + void internal_swap(hash_map_base &table) { + using std::swap; + swap(this->my_mask, table.my_mask); + swap(this->my_size, table.my_size); + for(size_type i = 0; i < embedded_buckets; i++) + swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list); + for(size_type i = embedded_block; i < pointers_per_table; i++) + swap(this->my_table[i], table.my_table[i]); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + void internal_move(hash_map_base&& other) { + my_mask = other.my_mask; + other.my_mask = embedded_buckets - 1; + my_size = other.my_size; + other.my_size = 0; + + for(size_type i = 0; i < embedded_buckets; ++i) { + my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list; + other.my_embedded_segment[i].node_list = NULL; + } + + for(size_type i = embedded_block; i < pointers_per_table; ++i) { + my_table[i] = other.my_table[i]; + other.my_table[i] = NULL; + } + } +#endif // __TBB_CPP11_RVALUE_REF_PRESENT + }; + + template + class hash_map_range; + + //! Meets requirements of a forward iterator for STL */ + /** Value is either the T or const T type of the container. + @ingroup containers */ + template + class hash_map_iterator + : public std::iterator + { + typedef Container map_type; + typedef typename Container::node node; + typedef hash_map_base::node_base node_base; + typedef hash_map_base::bucket bucket; + + template + friend bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ); + + template + friend bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ); + + template + friend ptrdiff_t operator-( const hash_map_iterator& i, const hash_map_iterator& j ); + + template + friend class hash_map_iterator; + + template + friend class hash_map_range; + + void advance_to_next_bucket() { // TODO?: refactor to iterator_base class + size_t k = my_index+1; + __TBB_ASSERT( my_bucket, "advancing an invalid iterator?"); + while( k <= my_map->my_mask ) { + // Following test uses 2's-complement wizardry + if( k&(k-2) ) // not the beginning of a segment + ++my_bucket; + else my_bucket = my_map->get_bucket( k ); + my_node = static_cast( my_bucket->node_list ); + if( hash_map_base::is_valid(my_node) ) { + my_index = k; return; + } + ++k; + } + my_bucket = 0; my_node = 0; my_index = k; // the end + } +#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) + template + friend class interface5::concurrent_hash_map; +#else + public: // workaround +#endif + //! concurrent_hash_map over which we are iterating. + const Container *my_map; + + //! Index in hash table for current item + size_t my_index; + + //! Pointer to bucket + const bucket *my_bucket; + + //! Pointer to node that has current item + node *my_node; + + hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ); + + public: + //! Construct undefined iterator + hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {} + hash_map_iterator( const hash_map_iterator &other ) : + my_map(other.my_map), + my_index(other.my_index), + my_bucket(other.my_bucket), + my_node(other.my_node) + {} + + hash_map_iterator& operator=( const hash_map_iterator &other ) { + my_map = other.my_map; + my_index = other.my_index; + my_bucket = other.my_bucket; + my_node = other.my_node; + return *this; + } + Value& operator*() const { + __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" ); + return my_node->value(); + } + Value* operator->() const {return &operator*();} + hash_map_iterator& operator++(); + + //! Post increment + hash_map_iterator operator++(int) { + hash_map_iterator old(*this); + operator++(); + return old; + } + }; + + template + hash_map_iterator::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) : + my_map(&map), + my_index(index), + my_bucket(b), + my_node( static_cast(n) ) + { + if( b && !hash_map_base::is_valid(n) ) + advance_to_next_bucket(); + } + + template + hash_map_iterator& hash_map_iterator::operator++() { + my_node = static_cast( my_node->next ); + if( !my_node ) advance_to_next_bucket(); + return *this; + } + + template + bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ) { + return i.my_node == j.my_node && i.my_map == j.my_map; + } + + template + bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ) { + return i.my_node != j.my_node || i.my_map != j.my_map; + } + + //! Range class used with concurrent_hash_map + /** @ingroup containers */ + template + class hash_map_range { + typedef typename Iterator::map_type map_type; + Iterator my_begin; + Iterator my_end; + mutable Iterator my_midpoint; + size_t my_grainsize; + //! Set my_midpoint to point approximately half way between my_begin and my_end. + void set_midpoint() const; + template friend class hash_map_range; + public: + //! Type for size of a range + typedef std::size_t size_type; + typedef typename Iterator::value_type value_type; + typedef typename Iterator::reference reference; + typedef typename Iterator::difference_type difference_type; + typedef Iterator iterator; + + //! True if range is empty. + bool empty() const {return my_begin==my_end;} + + //! True if range can be partitioned into two subranges. + bool is_divisible() const { + return my_midpoint!=my_end; + } + //! Split range. + hash_map_range( hash_map_range& r, split ) : + my_end(r.my_end), + my_grainsize(r.my_grainsize) + { + r.my_end = my_begin = r.my_midpoint; + __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); + __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); + set_midpoint(); + r.set_midpoint(); + } + //! type conversion + template + hash_map_range( hash_map_range& r) : + my_begin(r.my_begin), + my_end(r.my_end), + my_midpoint(r.my_midpoint), + my_grainsize(r.my_grainsize) + {} + //! Init range with container and grainsize specified + hash_map_range( const map_type &map, size_type grainsize_ = 1 ) : + my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ), + my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ), + my_grainsize( grainsize_ ) + { + __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); + set_midpoint(); + } + const Iterator& begin() const {return my_begin;} + const Iterator& end() const {return my_end;} + //! The grain size for this range. + size_type grainsize() const {return my_grainsize;} + }; + + template + void hash_map_range::set_midpoint() const { + // Split by groups of nodes + size_t m = my_end.my_index-my_begin.my_index; + if( m > my_grainsize ) { + m = my_begin.my_index + m/2u; + hash_map_base::bucket *b = my_begin.my_map->get_bucket(m); + my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list); + } else { + my_midpoint = my_end; + } + __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index, + "my_begin is after my_midpoint" ); + __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index, + "my_midpoint is after my_end" ); + __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end, + "[my_begin, my_midpoint) range should not be empty" ); + } + + } // internal +//! @endcond + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Suppress "conditional expression is constant" warning. + #pragma warning( push ) + #pragma warning( disable: 4127 ) +#endif + +//! Unordered map from Key to T. +/** concurrent_hash_map is associative container with concurrent access. + +@par Compatibility + The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). + +@par Exception Safety + - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors. + - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment). + - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results. + +@par Changes since TBB 2.1 + - Replaced internal algorithm and data structure. Patent is pending. + - Added buckets number argument for constructor + +@par Changes since TBB 2.0 + - Fixed exception-safety + - Added template argument for allocator + - Added allocator argument in constructors + - Added constructor from a range of iterators + - Added several new overloaded insert() methods + - Added get_allocator() + - Added swap() + - Added count() + - Added overloaded erase(accessor &) and erase(const_accessor&) + - Added equal_range() [const] + - Added [const_]pointer, [const_]reference, and allocator_type types + - Added global functions: operator==(), operator!=(), and swap() + + @ingroup containers */ +template +class concurrent_hash_map : protected internal::hash_map_base { + template + friend class internal::hash_map_iterator; + + template + friend class internal::hash_map_range; + +public: + typedef Key key_type; + typedef T mapped_type; + typedef std::pair value_type; + typedef hash_map_base::size_type size_type; + typedef ptrdiff_t difference_type; + typedef value_type *pointer; + typedef const value_type *const_pointer; + typedef value_type &reference; + typedef const value_type &const_reference; + typedef internal::hash_map_iterator iterator; + typedef internal::hash_map_iterator const_iterator; + typedef internal::hash_map_range range_type; + typedef internal::hash_map_range const_range_type; + typedef Allocator allocator_type; + +protected: + friend class const_accessor; + class node; + typedef typename tbb::internal::allocator_rebind::type node_allocator_type; + typedef tbb::internal::allocator_traits node_allocator_traits; + node_allocator_type my_allocator; + HashCompare my_hash_compare; + + class node : public node_base { + tbb::aligned_space my_value; + public: + value_type* storage() { return my_value.begin(); } + value_type& value() { return *storage(); } + }; + + void delete_node( node_base *n ) { + node_allocator_traits::destroy(my_allocator, static_cast(n)->storage()); + node_allocator_traits::destroy(my_allocator, static_cast(n)); + node_allocator_traits::deallocate(my_allocator, static_cast(n), 1); + } + + struct node_scoped_guard : tbb::internal::no_copy { + node* my_node; + node_allocator_type& my_alloc; + + node_scoped_guard(node* n, node_allocator_type& alloc) : my_node(n), my_alloc(alloc) {} + ~node_scoped_guard() { + if(my_node) { + node_allocator_traits::destroy(my_alloc, my_node); + node_allocator_traits::deallocate(my_alloc, my_node, 1); + } + } + void dismiss() { my_node = NULL; } + }; + +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + static node* create_node(node_allocator_type& allocator, Args&&... args) +#else + template + static node* create_node(node_allocator_type& allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2) +#endif + { + node* node_ptr = node_allocator_traits::allocate(allocator, 1); + node_scoped_guard guard(node_ptr, allocator); + node_allocator_traits::construct(allocator, node_ptr); +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward(args)...); +#else + node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward(arg1), tbb::internal::forward(arg2)); +#endif + guard.dismiss(); + return node_ptr; + } + + static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){ + return create_node(allocator, key, *t); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){ + return create_node(allocator, key, std::move(*const_cast(t))); + } +#endif + + static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){ +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT + // Emplace construct an empty T object inside the pair + return create_node(allocator, std::piecewise_construct, + std::forward_as_tuple(key), std::forward_as_tuple()); +#else + // Use of a temporary object is impossible, because create_node takes a non-const reference. + // copy-initialization is possible because T is already required to be CopyConstructible. + T obj = T(); + return create_node(allocator, key, tbb::internal::move(obj)); +#endif + } + + static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){ + __TBB_ASSERT(false,"this dummy function should not be called"); + return NULL; + } + + node *search_bucket( const key_type &key, bucket *b ) const { + node *n = static_cast( b->node_list ); + while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) ) + n = static_cast( n->next ); + __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket"); + return n; + } + + //! bucket accessor is to find, rehash, acquire a lock, and access a bucket + class bucket_accessor : public bucket::scoped_t { + bucket *my_b; + public: + bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); } + //! find a bucket by masked hashcode, optionally rehash, and acquire the lock + inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { + my_b = base->get_bucket( h ); + // TODO: actually, notification is unnecessary here, just hiding double-check + if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req + && try_acquire( my_b->mutex, /*write=*/true ) ) + { + if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing + } + else bucket::scoped_t::acquire( my_b->mutex, writer ); + __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL); + } + //! check whether bucket is locked for write + bool is_writer() { return bucket::scoped_t::is_writer; } + //! get bucket pointer + bucket *operator() () { return my_b; } + }; + + // TODO refactor to hash_base + void rehash_bucket( bucket *b_new, const hashcode_t h ) { + __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)"); + __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); + __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed + hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit +#if __TBB_STATISTICS + my_info_rehashes++; // invocations of rehash_bucket +#endif + + bucket_accessor b_old( this, h & mask ); + + mask = (mask<<1) | 1; // get full mask for new bucket + __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL ); + restart: + for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) { + hashcode_t c = my_hash_compare.hash( static_cast(n)->value().first ); +#if TBB_USE_ASSERT + hashcode_t bmask = h & (mask>>1); + bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket + __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" ); +#endif + if( (c & mask) == h ) { + if( !b_old.is_writer() ) + if( !b_old.upgrade_to_writer() ) { + goto restart; // node ptr can be invalid due to concurrent erase + } + *p = n->next; // exclude from b_old + add_to_bucket( b_new, n ); + } else p = &n->next; // iterate to next item + } + } + + struct call_clear_on_leave { + concurrent_hash_map* my_ch_map; + call_clear_on_leave( concurrent_hash_map* a_ch_map ) : my_ch_map(a_ch_map) {} + void dismiss() {my_ch_map = 0;} + ~call_clear_on_leave(){ + if (my_ch_map){ + my_ch_map->clear(); + } + } + }; +public: + + class accessor; + //! Combines data access, locking, and garbage collection. + class const_accessor : private node::scoped_t /*which derived from no_copy*/ { + friend class concurrent_hash_map; + friend class accessor; + public: + //! Type of value + typedef const typename concurrent_hash_map::value_type value_type; + + //! True if result is empty. + bool empty() const { return !my_node; } + + //! Set to null + void release() { + if( my_node ) { + node::scoped_t::release(); + my_node = 0; + } + } + + //! Return reference to associated value in hash table. + const_reference operator*() const { + __TBB_ASSERT( my_node, "attempt to dereference empty accessor" ); + return my_node->value(); + } + + //! Return pointer to associated value in hash table. + const_pointer operator->() const { + return &operator*(); + } + + //! Create empty result + const_accessor() : my_node(NULL) {} + + //! Destroy result after releasing the underlying reference. + ~const_accessor() { + my_node = NULL; // scoped lock's release() is called in its destructor + } + protected: + bool is_writer() { return node::scoped_t::is_writer; } + node *my_node; + hashcode_t my_hash; + }; + + //! Allows write access to elements and combines data access, locking, and garbage collection. + class accessor: public const_accessor { + public: + //! Type of value + typedef typename concurrent_hash_map::value_type value_type; + + //! Return reference to associated value in hash table. + reference operator*() const { + __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" ); + return this->my_node->value(); + } + + //! Return pointer to associated value in hash table. + pointer operator->() const { + return &operator*(); + } + }; + + //! Construct empty table. + explicit concurrent_hash_map( const allocator_type &a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a) + {} + + explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare) + {} + + //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. + concurrent_hash_map( size_type n, const allocator_type &a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a) + { + reserve( n, my_allocator ); + } + + concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare) + { + reserve( n, my_allocator ); + } + + //! Copy constructor + concurrent_hash_map( const concurrent_hash_map &table ) + : internal::hash_map_base(), + my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator())) + { + call_clear_on_leave scope_guard(this); + internal_copy(table); + scope_guard.dismiss(); + } + + concurrent_hash_map( const concurrent_hash_map &table, const allocator_type &a) + : internal::hash_map_base(), my_allocator(a) + { + call_clear_on_leave scope_guard(this); + internal_copy(table); + scope_guard.dismiss(); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move constructor + concurrent_hash_map( concurrent_hash_map &&table ) + : internal::hash_map_base(), my_allocator(std::move(table.get_allocator())) + { + internal_move(std::move(table)); + } + + //! Move constructor + concurrent_hash_map( concurrent_hash_map &&table, const allocator_type &a ) + : internal::hash_map_base(), my_allocator(a) + { + if (a == table.get_allocator()){ + internal_move(std::move(table)); + }else{ + call_clear_on_leave scope_guard(this); + internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size()); + scope_guard.dismiss(); + } + } +#endif //__TBB_CPP11_RVALUE_REF_PRESENT + + //! Construction with copying iteration range and given allocator instance + template + concurrent_hash_map( I first, I last, const allocator_type &a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a) + { + call_clear_on_leave scope_guard(this); + internal_copy(first, last, std::distance(first, last)); + scope_guard.dismiss(); + } + + template + concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare) + { + call_clear_on_leave scope_guard(this); + internal_copy(first, last, std::distance(first, last)); + scope_guard.dismiss(); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. + concurrent_hash_map( std::initializer_list il, const allocator_type &a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a) + { + call_clear_on_leave scope_guard(this); + internal_copy(il.begin(), il.end(), il.size()); + scope_guard.dismiss(); + } + + concurrent_hash_map( std::initializer_list il, const HashCompare& compare, const allocator_type& a = allocator_type() ) + : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare) + { + call_clear_on_leave scope_guard(this); + internal_copy(il.begin(), il.end(), il.size()); + scope_guard.dismiss(); + } + +#endif //__TBB_INITIALIZER_LISTS_PRESENT + + //! Assignment + concurrent_hash_map& operator=( const concurrent_hash_map &table ) { + if( this!=&table ) { + typedef typename node_allocator_traits::propagate_on_container_copy_assignment pocca_type; + clear(); + tbb::internal::allocator_copy_assignment(my_allocator, table.my_allocator, pocca_type()); + internal_copy(table); + } + return *this; + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move Assignment + concurrent_hash_map& operator=( concurrent_hash_map &&table ) { + if(this != &table) { + typedef typename node_allocator_traits::propagate_on_container_move_assignment pocma_type; + internal_move_assign(std::move(table), pocma_type()); + } + return *this; + } +#endif //__TBB_CPP11_RVALUE_REF_PRESENT + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Assignment + concurrent_hash_map& operator=( std::initializer_list il ) { + clear(); + internal_copy(il.begin(), il.end(), il.size()); + return *this; + } +#endif //__TBB_INITIALIZER_LISTS_PRESENT + + + //! Rehashes and optionally resizes the whole table. + /** Useful to optimize performance before or after concurrent operations. + Also enables using of find() and count() concurrent methods in serial context. */ + void rehash(size_type n = 0); + + //! Clear table + void clear(); + + //! Clear table and destroy it. + ~concurrent_hash_map() { clear(); } + + //------------------------------------------------------------------------ + // Parallel algorithm support + //------------------------------------------------------------------------ + range_type range( size_type grainsize=1 ) { + return range_type( *this, grainsize ); + } + const_range_type range( size_type grainsize=1 ) const { + return const_range_type( *this, grainsize ); + } + + //------------------------------------------------------------------------ + // STL support - not thread-safe methods + //------------------------------------------------------------------------ + iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); } + iterator end() { return iterator( *this, 0, 0, 0 ); } + const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); } + const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); } + std::pair equal_range( const Key& key ) { return internal_equal_range( key, end() ); } + std::pair equal_range( const Key& key ) const { return internal_equal_range( key, end() ); } + + //! Number of items in table. + size_type size() const { return my_size; } + + //! True if size()==0. + bool empty() const { return my_size == 0; } + + //! Upper bound on size. + size_type max_size() const {return (~size_type(0))/sizeof(node);} + + //! Returns the current number of buckets + size_type bucket_count() const { return my_mask+1; } + + //! return allocator object + allocator_type get_allocator() const { return this->my_allocator; } + + //! swap two instances. Iterators are invalidated + void swap( concurrent_hash_map &table ); + + //------------------------------------------------------------------------ + // concurrent map operations + //------------------------------------------------------------------------ + + //! Return count of items (0 or 1) + size_type count( const Key &key ) const { + return const_cast(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node ); + } + + //! Find item and acquire a read lock on the item. + /** Return true if item is found, false otherwise. */ + bool find( const_accessor &result, const Key &key ) const { + result.release(); + return const_cast(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node ); + } + + //! Find item and acquire a write lock on the item. + /** Return true if item is found, false otherwise. */ + bool find( accessor &result, const Key &key ) { + result.release(); + return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node ); + } + + //! Insert item (if not already present) and acquire a read lock on the item. + /** Returns true if item is new. */ + bool insert( const_accessor &result, const Key &key ) { + result.release(); + return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct ); + } + + //! Insert item (if not already present) and acquire a write lock on the item. + /** Returns true if item is new. */ + bool insert( accessor &result, const Key &key ) { + result.release(); + return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct ); + } + + //! Insert item by copying if there is no such key present already and acquire a read lock on the item. + /** Returns true if item is new. */ + bool insert( const_accessor &result, const value_type &value ) { + result.release(); + return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct ); + } + + //! Insert item by copying if there is no such key present already and acquire a write lock on the item. + /** Returns true if item is new. */ + bool insert( accessor &result, const value_type &value ) { + result.release(); + return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct ); + } + + //! Insert item by copying if there is no such key present already + /** Returns true if item is inserted. */ + bool insert( const value_type &value ) { + return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct ); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Insert item by copying if there is no such key present already and acquire a read lock on the item. + /** Returns true if item is new. */ + bool insert( const_accessor &result, value_type && value ) { + return generic_move_insert(result, std::move(value)); + } + + //! Insert item by copying if there is no such key present already and acquire a write lock on the item. + /** Returns true if item is new. */ + bool insert( accessor &result, value_type && value ) { + return generic_move_insert(result, std::move(value)); + } + + //! Insert item by copying if there is no such key present already + /** Returns true if item is inserted. */ + bool insert( value_type && value ) { + return generic_move_insert(accessor_not_used(), std::move(value)); + } + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + //! Insert item by copying if there is no such key present already and acquire a read lock on the item. + /** Returns true if item is new. */ + template + bool emplace( const_accessor &result, Args&&... args ) { + return generic_emplace(result, std::forward(args)...); + } + + //! Insert item by copying if there is no such key present already and acquire a write lock on the item. + /** Returns true if item is new. */ + template + bool emplace( accessor &result, Args&&... args ) { + return generic_emplace(result, std::forward(args)...); + } + + //! Insert item by copying if there is no such key present already + /** Returns true if item is inserted. */ + template + bool emplace( Args&&... args ) { + return generic_emplace(accessor_not_used(), std::forward(args)...); + } +#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +#endif //__TBB_CPP11_RVALUE_REF_PRESENT + + //! Insert range [first, last) + template + void insert( I first, I last ) { + for ( ; first != last; ++first ) + insert( *first ); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Insert initializer list + void insert( std::initializer_list il ) { + insert( il.begin(), il.end() ); + } +#endif //__TBB_INITIALIZER_LISTS_PRESENT + + //! Erase item. + /** Return true if item was erased by particularly this call. */ + bool erase( const Key& key ); + + //! Erase item by const_accessor. + /** Return true if item was erased by particularly this call. */ + bool erase( const_accessor& item_accessor ) { + return exclude( item_accessor ); + } + + //! Erase item by accessor. + /** Return true if item was erased by particularly this call. */ + bool erase( accessor& item_accessor ) { + return exclude( item_accessor ); + } + +protected: + //! Insert or find item and optionally acquire a lock on the item. + bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ), node *tmp_n = 0 ) ; + + struct accessor_not_used { void release(){}}; + friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;} + friend const_accessor* accessor_location( const_accessor & a ) { return &a;} + + friend bool is_write_access_needed( accessor const& ) { return true;} + friend bool is_write_access_needed( const_accessor const& ) { return false;} + friend bool is_write_access_needed( accessor_not_used const& ) { return false;} + +#if __TBB_CPP11_RVALUE_REF_PRESENT + template + bool generic_move_insert( Accessor && result, value_type && value ) { + result.release(); + return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct ); + } + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + bool generic_emplace( Accessor && result, Args &&... args ) { + result.release(); + node * node_ptr = create_node(my_allocator, std::forward(args)...); + return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr ); + } +#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +#endif //__TBB_CPP11_RVALUE_REF_PRESENT + + //! delete item by accessor + bool exclude( const_accessor &item_accessor ); + + //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) + template + std::pair internal_equal_range( const Key& key, I end ) const; + + //! Copy "source" to *this, where *this must start out empty. + void internal_copy( const concurrent_hash_map& source ); + + template + void internal_copy( I first, I last, size_type reserve_size ); + +#if __TBB_CPP11_RVALUE_REF_PRESENT + // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type + void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_true_type) { + tbb::internal::allocator_move_assignment(my_allocator, other.my_allocator, tbb::internal::traits_true_type()); + internal_move(std::move(other)); + } + + void internal_move_assign(concurrent_hash_map&& other, tbb::internal::traits_false_type) { + if (this->my_allocator == other.my_allocator) { + internal_move(std::move(other)); + } else { + //do per element move + internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size()); + } + } +#endif + + //! Fast find when no concurrent erasure is used. For internal use inside TBB only! + /** Return pointer to item with given key, or NULL if no such item exists. + Must not be called concurrently with erasure operations. */ + const_pointer internal_fast_find( const Key& key ) const { + hashcode_t h = my_hash_compare.hash( key ); + hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); + node *n; + restart: + __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); + bucket *b = get_bucket( h & m ); + // TODO: actually, notification is unnecessary here, just hiding double-check + if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req ) + { + bucket::scoped_t lock; + if( lock.try_acquire( b->mutex, /*write=*/true ) ) { + if( b->node_list == internal::rehash_req) + const_cast(this)->rehash_bucket( b, h & m ); //recursive rehashing + } + else lock.acquire( b->mutex, /*write=*/false ); + __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL); + } + n = search_bucket( key, b ); + if( n ) + return n->storage(); + else if( check_mask_race( h, m ) ) + goto restart; + return 0; + } +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +namespace internal { +using namespace tbb::internal; + +template typename Map, typename Key, typename T, typename... Args> +using hash_map_t = Map< + Key, T, + std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >, + pack_element_t<0, Args...>, tbb_hash_compare >, + std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >, + pack_element_t, tbb_allocator > > +>; +} + +// Deduction guide for the constructor from two iterators and hash_compare/ allocator +template +concurrent_hash_map(I, I, Args...) +-> internal::hash_map_t,internal::iterator_mapped_t, Args...>; + +// Deduction guide for the constructor from an initializer_list and hash_compare/ allocator +// Deduction guide for an initializer_list, hash_compare and allocator is implicit +template +concurrent_hash_map(std::initializer_list>, CompareOrAllocator) +-> internal::hash_map_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +template +bool concurrent_hash_map::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) { + __TBB_ASSERT( !result || !result->my_node, NULL ); + bool return_value; + hashcode_t const h = my_hash_compare.hash( key ); + hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); + segment_index_t grow_segment = 0; + node *n; + restart: + {//lock scope + __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); + return_value = false; + // get bucket + bucket_accessor b( this, h & m ); + + // find a node + n = search_bucket( key, b() ); + if( op_insert ) { + // [opt] insert a key + if( !n ) { + if( !tmp_n ) { + tmp_n = allocate_node(my_allocator, key, t); + } + if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion + // Rerun search_list, in case another thread inserted the item during the upgrade. + n = search_bucket( key, b() ); + if( is_valid(n) ) { // unfortunately, it did + b.downgrade_to_reader(); + goto exists; + } + } + if( check_mask_race(h, m) ) + goto restart; // b.release() is done in ~b(). + // insert and set flag to grow the container + grow_segment = insert_new_node( b(), n = tmp_n, m ); + tmp_n = 0; + return_value = true; + } + } else { // find or count + if( !n ) { + if( check_mask_race( h, m ) ) + goto restart; // b.release() is done in ~b(). TODO: replace by continue + return false; + } + return_value = true; + } + exists: + if( !result ) goto check_growth; + // TODO: the following seems as generic/regular operation + // acquire the item + if( !result->try_acquire( n->mutex, write ) ) { + for( tbb::internal::atomic_backoff backoff(true);; ) { + if( result->try_acquire( n->mutex, write ) ) break; + if( !backoff.bounded_pause() ) { + // the wait takes really long, restart the operation + b.release(); + __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" ); + __TBB_Yield(); + m = (hashcode_t) itt_load_word_with_acquire( my_mask ); + goto restart; + } + } + } + }//lock scope + result->my_node = n; + result->my_hash = h; +check_growth: + // [opt] grow the container + if( grow_segment ) { +#if __TBB_STATISTICS + my_info_resizes++; // concurrent ones +#endif + enable_segment( grow_segment, my_allocator ); + } + if( tmp_n ) // if op_insert only + delete_node( tmp_n ); + return return_value; +} + +template +template +std::pair concurrent_hash_map::internal_equal_range( const Key& key, I end_ ) const { + hashcode_t h = my_hash_compare.hash( key ); + hashcode_t m = my_mask; + __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); + h &= m; + bucket *b = get_bucket( h ); + while( b->node_list == internal::rehash_req ) { + m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit + b = get_bucket( h &= m ); + } + node *n = search_bucket( key, b ); + if( !n ) + return std::make_pair(end_, end_); + iterator lower(*this, h, b, n), upper(lower); + return std::make_pair(lower, ++upper); +} + +template +bool concurrent_hash_map::exclude( const_accessor &item_accessor ) { + __TBB_ASSERT( item_accessor.my_node, NULL ); + node_base *const n = item_accessor.my_node; + hashcode_t const h = item_accessor.my_hash; + hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); + do { + // get bucket + bucket_accessor b( this, h & m, /*writer=*/true ); + node_base **p = &b()->node_list; + while( *p && *p != n ) + p = &(*p)->next; + if( !*p ) { // someone else was first + if( check_mask_race( h, m ) ) + continue; + item_accessor.release(); + return false; + } + __TBB_ASSERT( *p == n, NULL ); + *p = n->next; // remove from container + my_size--; + break; + } while(true); + if( !item_accessor.is_writer() ) // need to get exclusive lock + item_accessor.upgrade_to_writer(); // return value means nothing here + item_accessor.release(); + delete_node( n ); // Only one thread can delete it + return true; +} + +template +bool concurrent_hash_map::erase( const Key &key ) { + node_base *n; + hashcode_t const h = my_hash_compare.hash( key ); + hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); +restart: + {//lock scope + // get bucket + bucket_accessor b( this, h & m ); + search: + node_base **p = &b()->node_list; + n = *p; + while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->value().first ) ) { + p = &n->next; + n = *p; + } + if( !n ) { // not found, but mask could be changed + if( check_mask_race( h, m ) ) + goto restart; + return false; + } + else if( !b.is_writer() && !b.upgrade_to_writer() ) { + if( check_mask_race( h, m ) ) // contended upgrade, check mask + goto restart; + goto search; + } + *p = n->next; + my_size--; + } + { + typename node::scoped_t item_locker( n->mutex, /*write=*/true ); + } + // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor! + delete_node( n ); // Only one thread can delete it due to write lock on the bucket + return true; +} + +template +void concurrent_hash_map::swap(concurrent_hash_map &table) { + typedef typename node_allocator_traits::propagate_on_container_swap pocs_type; + if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) { + using std::swap; + tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type()); + swap(this->my_hash_compare, table.my_hash_compare); + internal_swap(table); + } +} + +template +void concurrent_hash_map::rehash(size_type sz) { + reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well + hashcode_t mask = my_mask; + hashcode_t b = (mask+1)>>1; // size or first index of the last segment + __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2 + bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing + for(; b <= mask; b++, bp++ ) { + node_base *n = bp->node_list; + __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); + __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); + if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one + hashcode_t h = b; bucket *b_old = bp; + do { + __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); + hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit + b_old = get_bucket( h &= m ); + } while( b_old->node_list == internal::rehash_req ); + // now h - is index of the root rehashed bucket b_old + mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments + for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) { + hashcode_t c = my_hash_compare.hash( static_cast(q)->value().first ); + if( (c & mask) != h ) { // should be rehashed + *p = q->next; // exclude from b_old + bucket *b_new = get_bucket( c & mask ); + __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" ); + add_to_bucket( b_new, q ); + } else p = &q->next; // iterate to next item + } + } + } +#if TBB_USE_PERFORMANCE_WARNINGS + int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics + static bool reported = false; +#endif +#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS + for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing + if( b & (b-2) ) ++bp; // not the beginning of a segment + else bp = get_bucket( b ); + node_base *n = bp->node_list; + __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); + __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" ); +#if TBB_USE_PERFORMANCE_WARNINGS + if( n == internal::empty_rehashed ) empty_buckets++; + else if( n->next ) overpopulated_buckets++; +#endif +#if TBB_USE_ASSERT + for( ; is_valid(n); n = n->next ) { + hashcode_t h = my_hash_compare.hash( static_cast(n)->value().first ) & mask; + __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" ); + } +#endif + } +#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS +#if TBB_USE_PERFORMANCE_WARNINGS + if( buckets > current_size) empty_buckets -= buckets - current_size; + else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? + if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { + tbb::internal::runtime_warning( + "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", +#if __TBB_USE_OPTIONAL_RTTI + typeid(*this).name(), +#else + "concurrent_hash_map", +#endif + current_size, empty_buckets, overpopulated_buckets ); + reported = true; + } +#endif +} + +template +void concurrent_hash_map::clear() { + hashcode_t m = my_mask; + __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); +#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS +#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS + int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics + static bool reported = false; +#endif + bucket *bp = 0; + // check consistency + for( segment_index_t b = 0; b <= m; b++ ) { + if( b & (b-2) ) ++bp; // not the beginning of a segment + else bp = get_bucket( b ); + node_base *n = bp->node_list; + __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); + __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" ); +#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS + if( n == internal::empty_rehashed ) empty_buckets++; + else if( n == internal::rehash_req ) buckets--; + else if( n->next ) overpopulated_buckets++; +#endif +#if __TBB_EXTRA_DEBUG + for(; is_valid(n); n = n->next ) { + hashcode_t h = my_hash_compare.hash( static_cast(n)->value().first ); + h &= m; + __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" ); + } +#endif + } +#if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS +#if __TBB_STATISTICS + printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d" + " concurrent: resizes=%u rehashes=%u restarts=%u\n", + current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets, + unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) ); + my_info_resizes = 0; // concurrent ones + my_info_restarts = 0; // race collisions + my_info_rehashes = 0; // invocations of rehash_bucket +#endif + if( buckets > current_size) empty_buckets -= buckets - current_size; + else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? + if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { + tbb::internal::runtime_warning( + "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", +#if __TBB_USE_OPTIONAL_RTTI + typeid(*this).name(), +#else + "concurrent_hash_map", +#endif + current_size, empty_buckets, overpopulated_buckets ); + reported = true; + } +#endif +#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS + my_size = 0; + segment_index_t s = segment_index_of( m ); + __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" ); + do { + __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" ); + segment_ptr_t buckets_ptr = my_table[s]; + size_type sz = segment_size( s ? s : 1 ); + for( segment_index_t i = 0; i < sz; i++ ) + for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) { + buckets_ptr[i].node_list = n->next; + delete_node( n ); + } + delete_segment(s, my_allocator); + } while(s-- > 0); + my_mask = embedded_buckets - 1; +} + +template +void concurrent_hash_map::internal_copy( const concurrent_hash_map& source ) { + hashcode_t mask = source.my_mask; + if( my_mask == mask ) { // optimized version + reserve( source.my_size, my_allocator ); // TODO: load_factor? + bucket *dst = 0, *src = 0; + bool rehash_required = false; + for( hashcode_t k = 0; k <= mask; k++ ) { + if( k & (k-2) ) ++dst,src++; // not the beginning of a segment + else { dst = get_bucket( k ); src = source.get_bucket( k ); } + __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table"); + node *n = static_cast( src->node_list ); + if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets + rehash_required = true; + dst->node_list = internal::rehash_req; + } else for(; n; n = static_cast( n->next ) ) { + node* node_ptr = create_node(my_allocator, n->value().first, n->value().second); + add_to_bucket( dst, node_ptr); + ++my_size; // TODO: replace by non-atomic op + } + } + if( rehash_required ) rehash(); + } else internal_copy( source.begin(), source.end(), source.my_size ); +} + +template +template +void concurrent_hash_map::internal_copy(I first, I last, size_type reserve_size) { + reserve( reserve_size, my_allocator ); // TODO: load_factor? + hashcode_t m = my_mask; + for(; first != last; ++first) { + hashcode_t h = my_hash_compare.hash( (*first).first ); + bucket *b = get_bucket( h & m ); + __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table"); + node* node_ptr = create_node(my_allocator, (*first).first, (*first).second); + add_to_bucket( b, node_ptr ); + ++my_size; // TODO: replace by non-atomic op + } +} + +} // namespace interface5 + +using interface5::concurrent_hash_map; + + +template +inline bool operator==(const concurrent_hash_map &a, const concurrent_hash_map &b) { + if(a.size() != b.size()) return false; + typename concurrent_hash_map::const_iterator i(a.begin()), i_end(a.end()); + typename concurrent_hash_map::const_iterator j, j_end(b.end()); + for(; i != i_end; ++i) { + j = b.equal_range(i->first).first; + if( j == j_end || !(i->second == j->second) ) return false; + } + return true; +} + +template +inline bool operator!=(const concurrent_hash_map &a, const concurrent_hash_map &b) +{ return !(a == b); } + +template +inline void swap(concurrent_hash_map &a, concurrent_hash_map &b) +{ a.swap( b ); } + +#if _MSC_VER && !defined(__INTEL_COMPILER) + #pragma warning( pop ) +#endif // warning 4127 is back + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_hash_map_H_include_area + +#endif /* __TBB_concurrent_hash_map_H */ diff --git a/ohos/arm64-v8a/include/tbb/concurrent_lru_cache.h b/ohos/arm64-v8a/include/tbb/concurrent_lru_cache.h new file mode 100644 index 00000000..a18dbf29 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_lru_cache.h @@ -0,0 +1,290 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_lru_cache_H +#define __TBB_concurrent_lru_cache_H + +#define __TBB_concurrent_lru_cache_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE + #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h +#endif + +#include "tbb_stddef.h" + +#include +#include +#include // std::find +#if __TBB_CPP11_RVALUE_REF_PRESENT +#include // std::move +#endif + +#include "atomic.h" +#include "internal/_aggregator_impl.h" + +namespace tbb{ +namespace interface6 { + + +template +class concurrent_lru_cache : internal::no_assign{ +private: + typedef concurrent_lru_cache self_type; + typedef value_functor_type value_function_type; + typedef std::size_t ref_counter_type; + struct map_value_type; + typedef std::map map_storage_type; + typedef std::list lru_list_type; + struct map_value_type { + value_type my_value; + ref_counter_type my_ref_counter; + typename lru_list_type::iterator my_lru_list_iterator; + bool my_is_ready; + + map_value_type (value_type const& a_value, ref_counter_type a_ref_counter, typename lru_list_type::iterator a_lru_list_iterator, bool a_is_ready) + : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator), my_is_ready(a_is_ready) + {} + }; + + class handle_object; + + struct aggregator_operation; + typedef aggregator_operation aggregated_operation_type; + typedef tbb::internal::aggregating_functor aggregator_function_type; + friend class tbb::internal::aggregating_functor; + typedef tbb::internal::aggregator aggregator_type; + +private: + value_function_type my_value_function; + std::size_t const my_number_of_lru_history_items; + map_storage_type my_map_storage; + lru_list_type my_lru_list; + aggregator_type my_aggregator; + +public: + typedef handle_object handle; + +public: + concurrent_lru_cache(value_function_type f, std::size_t number_of_lru_history_items) + : my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items) + { + my_aggregator.initialize_handler(aggregator_function_type(this)); + } + + handle_object operator[](key_type k){ + retrieve_aggregator_operation op(k); + my_aggregator.execute(&op); + if (op.is_new_value_needed()){ + op.result().second.my_value = my_value_function(k); + __TBB_store_with_release(op.result().second.my_is_ready, true); + }else{ + tbb::internal::spin_wait_while_eq(op.result().second.my_is_ready,false); + } + return handle_object(*this,op.result()); + } +private: + void signal_end_of_usage(typename map_storage_type::reference value_ref){ + signal_end_of_usage_aggregator_operation op(value_ref); + my_aggregator.execute(&op); + } + +private: +#if !__TBB_CPP11_RVALUE_REF_PRESENT + struct handle_move_t:no_assign{ + concurrent_lru_cache & my_cache_ref; + typename map_storage_type::reference my_map_record_ref; + handle_move_t(concurrent_lru_cache & cache_ref, typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_map_record_ref(value_ref) {}; + }; +#endif + class handle_object { + concurrent_lru_cache * my_cache_pointer; + typename map_storage_type::pointer my_map_record_ptr; + public: + handle_object() : my_cache_pointer(), my_map_record_ptr() {} + handle_object(concurrent_lru_cache& cache_ref, typename map_storage_type::reference value_ref) : my_cache_pointer(&cache_ref), my_map_record_ptr(&value_ref) {} + operator bool() const { + return (my_cache_pointer && my_map_record_ptr); + } +#if __TBB_CPP11_RVALUE_REF_PRESENT + // TODO: add check for double moved objects by special dedicated field + handle_object(handle_object&& src) : my_cache_pointer(src.my_cache_pointer), my_map_record_ptr(src.my_map_record_ptr) { + __TBB_ASSERT((src.my_cache_pointer && src.my_map_record_ptr) || (!src.my_cache_pointer && !src.my_map_record_ptr), "invalid state of moving object?"); + src.my_cache_pointer = NULL; + src.my_map_record_ptr = NULL; + } + handle_object& operator=(handle_object&& src) { + __TBB_ASSERT((src.my_cache_pointer && src.my_map_record_ptr) || (!src.my_cache_pointer && !src.my_map_record_ptr), "invalid state of moving object?"); + if (my_cache_pointer) { + my_cache_pointer->signal_end_of_usage(*my_map_record_ptr); + } + my_cache_pointer = src.my_cache_pointer; + my_map_record_ptr = src.my_map_record_ptr; + src.my_cache_pointer = NULL; + src.my_map_record_ptr = NULL; + return *this; + } +#else + handle_object(handle_move_t m) : my_cache_pointer(&m.my_cache_ref), my_map_record_ptr(&m.my_map_record_ref) {} + handle_object& operator=(handle_move_t m) { + if (my_cache_pointer) { + my_cache_pointer->signal_end_of_usage(*my_map_record_ptr); + } + my_cache_pointer = &m.my_cache_ref; + my_map_record_ptr = &m.my_map_record_ref; + return *this; + } + operator handle_move_t(){ + return move(*this); + } +#endif // __TBB_CPP11_RVALUE_REF_PRESENT + value_type& value(){ + __TBB_ASSERT(my_cache_pointer,"get value from already moved object?"); + __TBB_ASSERT(my_map_record_ptr,"get value from an invalid or already moved object?"); + return my_map_record_ptr->second.my_value; + } + ~handle_object(){ + if (my_cache_pointer){ + my_cache_pointer->signal_end_of_usage(*my_map_record_ptr); + } + } + private: +#if __TBB_CPP11_RVALUE_REF_PRESENT + // For source compatibility with C++03 + friend handle_object&& move(handle_object& h){ + return std::move(h); + } +#else + friend handle_move_t move(handle_object& h){ + return handle_object::move(h); + } + // TODO: add check for double moved objects by special dedicated field + static handle_move_t move(handle_object& h){ + __TBB_ASSERT((h.my_cache_pointer && h.my_map_record_ptr) || (!h.my_cache_pointer && !h.my_map_record_ptr), "invalid state of moving object?"); + concurrent_lru_cache * cache_pointer = h.my_cache_pointer; + typename map_storage_type::pointer map_record_ptr = h.my_map_record_ptr; + h.my_cache_pointer = NULL; + h.my_map_record_ptr = NULL; + return handle_move_t(*cache_pointer, *map_record_ptr); + } +#endif // __TBB_CPP11_RVALUE_REF_PRESENT + private: + void operator=(handle_object&); +#if __SUNPRO_CC + // Presumably due to a compiler error, private copy constructor + // breaks expressions like handle h = cache[key]; + public: +#endif + handle_object(handle_object &); + }; +private: + //TODO: looks like aggregator_operation is a perfect match for statically typed variant type + struct aggregator_operation : tbb::internal::aggregated_operation{ + enum e_op_type {op_retive, op_signal_end_of_usage}; + //TODO: try to use pointer to function apply_visitor here + //TODO: try virtual functions and measure the difference + e_op_type my_operation_type; + aggregator_operation(e_op_type operation_type): my_operation_type(operation_type) {} + void cast_and_handle(self_type& container ){ + if (my_operation_type==op_retive){ + static_cast(this)->handle(container); + }else{ + static_cast(this)->handle(container); + } + } + }; + struct retrieve_aggregator_operation : aggregator_operation, private internal::no_assign { + key_type my_key; + typename map_storage_type::pointer my_result_map_record_pointer; + bool my_is_new_value_needed; + retrieve_aggregator_operation(key_type key):aggregator_operation(aggregator_operation::op_retive),my_key(key),my_is_new_value_needed(false){} + void handle(self_type& container ){ + my_result_map_record_pointer = & container.retrieve_serial(my_key,my_is_new_value_needed); + } + typename map_storage_type::reference result(){ return * my_result_map_record_pointer; } + bool is_new_value_needed(){return my_is_new_value_needed;} + }; + struct signal_end_of_usage_aggregator_operation : aggregator_operation, private internal::no_assign { + typename map_storage_type::reference my_map_record_ref; + signal_end_of_usage_aggregator_operation(typename map_storage_type::reference map_record_ref):aggregator_operation(aggregator_operation::op_signal_end_of_usage),my_map_record_ref(map_record_ref){} + void handle(self_type& container ){ + container.signal_end_of_usage_serial(my_map_record_ref); + } + }; + +private: + void handle_operations(aggregator_operation* op_list){ + while(op_list){ + op_list->cast_and_handle(*this); + aggregator_operation* tmp = op_list; + op_list=op_list->next; + tbb::internal::itt_store_word_with_release(tmp->status, uintptr_t(1)); + } + } + +private: + typename map_storage_type::reference retrieve_serial(key_type k, bool& is_new_value_needed){ + typename map_storage_type::iterator it = my_map_storage.find(k); + if (it == my_map_storage.end()){ + it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),false))); + is_new_value_needed = true; + }else { + typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator; + if (list_it!=my_lru_list.end()) { + __TBB_ASSERT(!it->second.my_ref_counter,"item to be evicted should not have a live references"); + //item is going to be used. Therefore it is not a subject for eviction + //so - remove it from LRU history. + my_lru_list.erase(list_it); + it->second.my_lru_list_iterator= my_lru_list.end(); + } + } + ++(it->second.my_ref_counter); + return *it; + } + + void signal_end_of_usage_serial(typename map_storage_type::reference map_record_ref){ + typename map_storage_type::iterator it = my_map_storage.find(map_record_ref.first); + __TBB_ASSERT(it!=my_map_storage.end(),"cache should not return past-end iterators to outer world"); + __TBB_ASSERT(&(*it) == &map_record_ref,"dangling reference has been returned to outside world? data race ?"); + __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it), + "object in use should not be in list of unused objects "); + if (! --(it->second.my_ref_counter)){ + //it was the last reference so put it to the LRU history + if (my_lru_list.size()>=my_number_of_lru_history_items){ + //evict items in order to get a space + size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items; + for (size_t i=0; isecond.my_ref_counter,"item to be evicted should not have a live references"); + my_lru_list.pop_back(); + my_map_storage.erase(it_to_evict); + } + } + my_lru_list.push_front(it); + it->second.my_lru_list_iterator = my_lru_list.begin(); + } + } +}; +} // namespace interface6 + +using interface6::concurrent_lru_cache; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_lru_cache_H_include_area + +#endif //__TBB_concurrent_lru_cache_H diff --git a/ohos/arm64-v8a/include/tbb/concurrent_map.h b/ohos/arm64-v8a/include/tbb/concurrent_map.h new file mode 100644 index 00000000..32fbe684 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_map.h @@ -0,0 +1,389 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_map_H +#define __TBB_concurrent_map_H + +#define __TBB_concurrent_map_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if !TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS +#error Set TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS to include concurrent_map.h +#endif + +#include "tbb_config.h" + +// concurrent_map requires C++11 support +#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT + +#include "internal/_concurrent_skip_list_impl.h" + +namespace tbb { + +namespace interface10 { + +template +class map_traits { +public: + static constexpr size_t MAX_LEVEL = MAX_LEVELS; + using random_level_generator_type = RandomGenerator; + using key_type = Key; + using mapped_type = Value; + using compare_type = KeyCompare; + using value_type = std::pair; + using reference = value_type&; + using const_reference = const value_type&; + using allocator_type = Allocator; + using mutex_type = tbb::spin_mutex; + using node_type = tbb::internal::node_handle, allocator_type>; + + static const bool allow_multimapping = AllowMultimapping; + + class value_compare { + public: + // TODO: these member types are deprecated in C++17, do we need to let them + using result_type = bool; + using first_argument_type = value_type; + using second_argument_type = value_type; + + bool operator()(const value_type& lhs, const value_type& rhs) const { + return comp(lhs.first, rhs.first); + } + + protected: + value_compare(compare_type c) : comp(c) {} + + friend class map_traits; + + compare_type comp; + }; + + static value_compare value_comp(compare_type comp) { return value_compare(comp); } + + static const key_type& get_key(const_reference val) { + return val.first; + } +}; // class map_traits + +template +class concurrent_multimap; + +template , typename Allocator = tbb_allocator>> +class concurrent_map + : public internal::concurrent_skip_list, 64, Allocator, false>> { + using traits_type = map_traits, 64, Allocator, false>; + using base_type = internal::concurrent_skip_list; +#if __TBB_EXTRA_DEBUG +public: +#endif + using base_type::allow_multimapping; +public: + using key_type = Key; + using mapped_type = Value; + using value_type = typename traits_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using reverse_iterator = typename base_type::reverse_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + using node_type = typename base_type::node_type; + + using base_type::end; + using base_type::find; + using base_type::emplace; + using base_type::insert; + + concurrent_map() = default; + + explicit concurrent_map(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {} + + explicit concurrent_map(const allocator_type& alloc) : base_type(key_compare(), alloc) {} + + template< class InputIt > + concurrent_map(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(first, last, comp, alloc) {} + + template< class InputIt > + concurrent_map(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {} + + /** Copy constructor */ + concurrent_map(const concurrent_map&) = default; + + concurrent_map(const concurrent_map& other, const allocator_type& alloc) : base_type(other, alloc) {} + + concurrent_map(concurrent_map&&) = default; + + concurrent_map(concurrent_map&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {} + + concurrent_map(std::initializer_list init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(init); + } + + concurrent_map(std::initializer_list init, const allocator_type& alloc) + : base_type(key_compare(), alloc) { + insert(init); + } + + concurrent_map& operator=(const concurrent_map& other) { + return static_cast(base_type::operator=(other)); + } + + concurrent_map& operator=(concurrent_map&& other) { + return static_cast(base_type::operator=(std::move(other))); + } + + mapped_type& at(const key_type& key) { + iterator it = find(key); + + if (it == end()) { + tbb::internal::throw_exception(tbb::internal::eid_invalid_key); + } + + return it->second; + } + + const mapped_type& at(const key_type& key) const { + const_iterator it = find(key); + + if (it == end()) { + tbb::internal::throw_exception(tbb::internal::eid_invalid_key); + } + + return it->second; + } + + mapped_type& operator[](const key_type& key) { + iterator it = find(key); + + if (it == end()) { + it = emplace(std::piecewise_construct, std::forward_as_tuple(key), std::tuple<>()).first; + } + + return it->second; + } + + mapped_type& operator[](key_type&& key) { + iterator it = find(key); + + if (it == end()) { + it = emplace(std::piecewise_construct, std::forward_as_tuple(std::move(key)), std::tuple<>()).first; + } + + return it->second; + } + + template::value>::type> + std::pair insert(P&& value) { + return emplace(std::forward

(value)); + } + + template::value>::type> + iterator insert(const_iterator hint, P&& value) { + return emplace_hint(hint, std::forward

(value)); + return end(); + } + + template + void merge(concurrent_map& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_map&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multimap& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multimap&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_map + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace internal { + +using namespace tbb::internal; + +template typename Map, typename Key, typename T, typename... Args> +using c_map_t = Map 0) && !is_allocator_v >, + pack_element_t<0, Args...>, std::less >, + std::conditional_t< (sizeof...(Args) > 0) && is_allocator_v >, + pack_element_t, tbb_allocator > > >; +} // namespace internal + +template +concurrent_map(It, It, Args...) +-> internal::c_map_t, internal::iterator_mapped_t, Args...>; + +template +concurrent_map(std::initializer_list>, Args...) +-> internal::c_map_t; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template , typename Allocator = tbb_allocator>> +class concurrent_multimap + : public internal::concurrent_skip_list, 64, Allocator, true>> { + using traits_type = map_traits, 64, Allocator, true>; + using base_type = internal::concurrent_skip_list; +#if __TBB_EXTRA_DEBUG +public: +#endif + using base_type::allow_multimapping; +public: + using key_type = Key; + using mapped_type = Value; + using value_type = typename traits_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using reverse_iterator = typename base_type::reverse_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + using node_type = typename base_type::node_type; + + using base_type::end; + using base_type::find; + using base_type::emplace; + using base_type::insert; + + concurrent_multimap() = default; + + explicit concurrent_multimap(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {} + + explicit concurrent_multimap(const allocator_type& alloc) : base_type(key_compare(), alloc) {} + + template< class InputIt > + concurrent_multimap(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(first, last, comp, alloc) {} + + template< class InputIt > + concurrent_multimap(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {} + + /** Copy constructor */ + concurrent_multimap(const concurrent_multimap&) = default; + + concurrent_multimap(const concurrent_multimap& other, const allocator_type& alloc) : base_type(other, alloc) {} + + concurrent_multimap(concurrent_multimap&&) = default; + + concurrent_multimap(concurrent_multimap&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {} + + concurrent_multimap(std::initializer_list init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(init); + } + + concurrent_multimap(std::initializer_list init, const allocator_type& alloc) + : base_type(key_compare(), alloc) { + insert(init); + } + + concurrent_multimap& operator=(const concurrent_multimap& other) { + return static_cast(base_type::operator=(other)); + } + + concurrent_multimap& operator=(concurrent_multimap&& other) { + return static_cast(base_type::operator=(std::move(other))); + } + + template::value>::type> + std::pair insert(P&& value) { + return emplace(std::forward

(value)); + } + + template::value>::type> + iterator insert(const_iterator hint, P&& value) { + return emplace_hint(hint, std::forward

(value)); + return end(); + } + + template + void merge(concurrent_multimap& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multimap&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_map& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_map&& source) { + this->internal_merge(std::move(source)); + } + +}; // class concurrent_multimap + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template +concurrent_multimap(It, It, Args...) +-> internal::c_map_t, internal::iterator_mapped_t, Args...>; + +template +concurrent_multimap(std::initializer_list>, Args...) +-> internal::c_map_t; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +} // namespace interface10 + +using interface10::concurrent_map; +using interface10::concurrent_multimap; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_map_H_include_area + +#endif // __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT +#endif // __TBB_concurrent_map_H diff --git a/ohos/arm64-v8a/include/tbb/concurrent_priority_queue.h b/ohos/arm64-v8a/include/tbb/concurrent_priority_queue.h new file mode 100644 index 00000000..9c70098b --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_priority_queue.h @@ -0,0 +1,552 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_priority_queue_H +#define __TBB_concurrent_priority_queue_H + +#define __TBB_concurrent_priority_queue_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "atomic.h" +#include "cache_aligned_allocator.h" +#include "tbb_exception.h" +#include "tbb_stddef.h" +#include "tbb_profiling.h" +#include "internal/_aggregator_impl.h" +#include "internal/_template_helpers.h" +#include "internal/_allocator_traits.h" +#include +#include +#include +#include __TBB_STD_SWAP_HEADER + +#if __TBB_INITIALIZER_LISTS_PRESENT + #include +#endif + +#if __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT + #include +#endif + +namespace tbb { +namespace interface5 { +namespace internal { +#if __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT + template::value> + struct use_element_copy_constructor { + typedef tbb::internal::true_type type; + }; + template + struct use_element_copy_constructor { + typedef tbb::internal::false_type type; + }; +#else + template + struct use_element_copy_constructor { + typedef tbb::internal::true_type type; + }; +#endif +} // namespace internal + +using namespace tbb::internal; + +//! Concurrent priority queue +template , typename A=cache_aligned_allocator > +class concurrent_priority_queue { + public: + //! Element type in the queue. + typedef T value_type; + + //! Reference type + typedef T& reference; + + //! Const reference type + typedef const T& const_reference; + + //! Integral type for representing size of the queue. + typedef size_t size_type; + + //! Difference type for iterator + typedef ptrdiff_t difference_type; + + //! Allocator type + typedef A allocator_type; + + //! Constructs a new concurrent_priority_queue with default capacity + explicit concurrent_priority_queue(const allocator_type& a = allocator_type()) : mark(0), my_size(0), compare(), data(a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + } + + //! Constructs a new concurrent_priority_queue with default capacity + explicit concurrent_priority_queue(const Compare& c, const allocator_type& a = allocator_type()) : mark(0), my_size(0), compare(c), data(a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + } + + //! Constructs a new concurrent_priority_queue with init_sz capacity + explicit concurrent_priority_queue(size_type init_capacity, const allocator_type& a = allocator_type()) : + mark(0), my_size(0), compare(), data(a) + { + data.reserve(init_capacity); + my_aggregator.initialize_handler(my_functor_t(this)); + } + + //! Constructs a new concurrent_priority_queue with init_sz capacity + explicit concurrent_priority_queue(size_type init_capacity, const Compare& c, const allocator_type& a = allocator_type()) : + mark(0), my_size(0), compare(c), data(a) + { + data.reserve(init_capacity); + my_aggregator.initialize_handler(my_functor_t(this)); + } + + //! [begin,end) constructor + template + concurrent_priority_queue(InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : + mark(0), compare(), data(begin, end, a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + heapify(); + my_size = data.size(); + } + + //! [begin,end) constructor + template + concurrent_priority_queue(InputIterator begin, InputIterator end, const Compare& c, const allocator_type& a = allocator_type()) : + mark(0), compare(c), data(begin, end, a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + heapify(); + my_size = data.size(); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Constructor from std::initializer_list + concurrent_priority_queue(std::initializer_list init_list, const allocator_type &a = allocator_type()) : + mark(0), compare(), data(init_list.begin(), init_list.end(), a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + heapify(); + my_size = data.size(); + } + + //! Constructor from std::initializer_list + concurrent_priority_queue(std::initializer_list init_list, const Compare& c, const allocator_type &a = allocator_type()) : + mark(0), compare(c), data(init_list.begin(), init_list.end(), a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + heapify(); + my_size = data.size(); + } +#endif //# __TBB_INITIALIZER_LISTS_PRESENT + + //! Copy constructor + /** This operation is unsafe if there are pending concurrent operations on the src queue. */ + concurrent_priority_queue(const concurrent_priority_queue& src) : mark(src.mark), + my_size(src.my_size), data(src.data.begin(), src.data.end(), src.data.get_allocator()) + { + my_aggregator.initialize_handler(my_functor_t(this)); + heapify(); + } + + //! Copy constructor with specific allocator + /** This operation is unsafe if there are pending concurrent operations on the src queue. */ + concurrent_priority_queue(const concurrent_priority_queue& src, const allocator_type& a) : mark(src.mark), + my_size(src.my_size), data(src.data.begin(), src.data.end(), a) + { + my_aggregator.initialize_handler(my_functor_t(this)); + heapify(); + } + + //! Assignment operator + /** This operation is unsafe if there are pending concurrent operations on the src queue. */ + concurrent_priority_queue& operator=(const concurrent_priority_queue& src) { + if (this != &src) { + vector_t(src.data.begin(), src.data.end(), src.data.get_allocator()).swap(data); + mark = src.mark; + my_size = src.my_size; + } + return *this; + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move constructor + /** This operation is unsafe if there are pending concurrent operations on the src queue. */ + concurrent_priority_queue(concurrent_priority_queue&& src) : mark(src.mark), + my_size(src.my_size), data(std::move(src.data)) + { + my_aggregator.initialize_handler(my_functor_t(this)); + } + + //! Move constructor with specific allocator + /** This operation is unsafe if there are pending concurrent operations on the src queue. */ + concurrent_priority_queue(concurrent_priority_queue&& src, const allocator_type& a) : mark(src.mark), + my_size(src.my_size), +#if __TBB_ALLOCATOR_TRAITS_PRESENT + data(std::move(src.data), a) +#else + // Some early version of C++11 STL vector does not have a constructor of vector(vector&& , allocator). + // It seems that the reason is absence of support of allocator_traits (stateful allocators). + data(a) +#endif //__TBB_ALLOCATOR_TRAITS_PRESENT + { + my_aggregator.initialize_handler(my_functor_t(this)); +#if !__TBB_ALLOCATOR_TRAITS_PRESENT + if (a != src.data.get_allocator()){ + data.reserve(src.data.size()); + data.assign(std::make_move_iterator(src.data.begin()), std::make_move_iterator(src.data.end())); + }else{ + data = std::move(src.data); + } +#endif //!__TBB_ALLOCATOR_TRAITS_PRESENT + } + + //! Move assignment operator + /** This operation is unsafe if there are pending concurrent operations on the src queue. */ + concurrent_priority_queue& operator=( concurrent_priority_queue&& src) { + if (this != &src) { + mark = src.mark; + my_size = src.my_size; +#if !__TBB_ALLOCATOR_TRAITS_PRESENT + if (data.get_allocator() != src.data.get_allocator()){ + vector_t(std::make_move_iterator(src.data.begin()), std::make_move_iterator(src.data.end()), data.get_allocator()).swap(data); + }else +#endif //!__TBB_ALLOCATOR_TRAITS_PRESENT + { + data = std::move(src.data); + } + } + return *this; + } +#endif //__TBB_CPP11_RVALUE_REF_PRESENT + + //! Assign the queue from [begin,end) range, not thread-safe + template + void assign(InputIterator begin, InputIterator end) { + vector_t(begin, end, data.get_allocator()).swap(data); + mark = 0; + my_size = data.size(); + heapify(); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Assign the queue from std::initializer_list, not thread-safe + void assign(std::initializer_list il) { this->assign(il.begin(), il.end()); } + + //! Assign from std::initializer_list, not thread-safe + concurrent_priority_queue& operator=(std::initializer_list il) { + this->assign(il.begin(), il.end()); + return *this; + } +#endif //# __TBB_INITIALIZER_LISTS_PRESENT + + //! Returns true if empty, false otherwise + /** Returned value may not reflect results of pending operations. + This operation reads shared data and will trigger a race condition. */ + bool empty() const { return size()==0; } + + //! Returns the current number of elements contained in the queue + /** Returned value may not reflect results of pending operations. + This operation reads shared data and will trigger a race condition. */ + size_type size() const { return __TBB_load_with_acquire(my_size); } + + //! Pushes elem onto the queue, increasing capacity of queue if necessary + /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ + void push(const_reference elem) { +#if __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT + __TBB_STATIC_ASSERT( std::is_copy_constructible::value, "The type is not copy constructible. Copying push operation is impossible." ); +#endif + cpq_operation op_data(elem, PUSH_OP); + my_aggregator.execute(&op_data); + if (op_data.status == FAILED) // exception thrown + throw_exception(eid_bad_alloc); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Pushes elem onto the queue, increasing capacity of queue if necessary + /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ + void push(value_type &&elem) { + cpq_operation op_data(elem, PUSH_RVALUE_OP); + my_aggregator.execute(&op_data); + if (op_data.status == FAILED) // exception thrown + throw_exception(eid_bad_alloc); + } + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + //! Constructs a new element using args as the arguments for its construction and pushes it onto the queue */ + /** This operation can be safely used concurrently with other push, try_pop or emplace operations. */ + template + void emplace(Args&&... args) { + push(value_type(std::forward(args)...)); + } +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + + //! Gets a reference to and removes highest priority element + /** If a highest priority element was found, sets elem and returns true, + otherwise returns false. + This operation can be safely used concurrently with other push, try_pop or emplace operations. */ + bool try_pop(reference elem) { + cpq_operation op_data(POP_OP); + op_data.elem = &elem; + my_aggregator.execute(&op_data); + return op_data.status==SUCCEEDED; + } + + //! Clear the queue; not thread-safe + /** This operation is unsafe if there are pending concurrent operations on the queue. + Resets size, effectively emptying queue; does not free space. + May not clear elements added in pending operations. */ + void clear() { + data.clear(); + mark = 0; + my_size = 0; + } + + //! Swap this queue with another; not thread-safe + /** This operation is unsafe if there are pending concurrent operations on the queue. */ + void swap(concurrent_priority_queue& q) { + using std::swap; + data.swap(q.data); + swap(mark, q.mark); + swap(my_size, q.my_size); + } + + //! Return allocator object + allocator_type get_allocator() const { return data.get_allocator(); } + + private: + enum operation_type {INVALID_OP, PUSH_OP, POP_OP, PUSH_RVALUE_OP}; + enum operation_status { WAIT=0, SUCCEEDED, FAILED }; + + class cpq_operation : public aggregated_operation { + public: + operation_type type; + union { + value_type *elem; + size_type sz; + }; + cpq_operation(const_reference e, operation_type t) : + type(t), elem(const_cast(&e)) {} + cpq_operation(operation_type t) : type(t) {} + }; + + class my_functor_t { + concurrent_priority_queue *cpq; + public: + my_functor_t() {} + my_functor_t(concurrent_priority_queue *cpq_) : cpq(cpq_) {} + void operator()(cpq_operation* op_list) { + cpq->handle_operations(op_list); + } + }; + + typedef tbb::internal::aggregator< my_functor_t, cpq_operation > aggregator_t; + aggregator_t my_aggregator; + //! Padding added to avoid false sharing + char padding1[NFS_MaxLineSize - sizeof(aggregator_t)]; + //! The point at which unsorted elements begin + size_type mark; + __TBB_atomic size_type my_size; + Compare compare; + //! Padding added to avoid false sharing + char padding2[NFS_MaxLineSize - (2*sizeof(size_type)) - sizeof(Compare)]; + //! Storage for the heap of elements in queue, plus unheapified elements + /** data has the following structure: + + binary unheapified + heap elements + ____|_______|____ + | | | + v v v + [_|...|_|_|...|_| |...| ] + 0 ^ ^ ^ + | | |__capacity + | |__my_size + |__mark + + Thus, data stores the binary heap starting at position 0 through + mark-1 (it may be empty). Then there are 0 or more elements + that have not yet been inserted into the heap, in positions + mark through my_size-1. */ + typedef std::vector vector_t; + vector_t data; + + void handle_operations(cpq_operation *op_list) { + cpq_operation *tmp, *pop_list=NULL; + + __TBB_ASSERT(mark == data.size(), NULL); + + // First pass processes all constant (amortized; reallocation may happen) time pushes and pops. + while (op_list) { + // ITT note: &(op_list->status) tag is used to cover accesses to op_list + // node. This thread is going to handle the operation, and so will acquire it + // and perform the associated operation w/o triggering a race condition; the + // thread that created the operation is waiting on the status field, so when + // this thread is done with the operation, it will perform a + // store_with_release to give control back to the waiting thread in + // aggregator::insert_operation. + call_itt_notify(acquired, &(op_list->status)); + __TBB_ASSERT(op_list->type != INVALID_OP, NULL); + tmp = op_list; + op_list = itt_hide_load_word(op_list->next); + if (tmp->type == POP_OP) { + if (mark < data.size() && + compare(data[0], data[data.size()-1])) { + // there are newly pushed elems and the last one + // is higher than top + *(tmp->elem) = tbb::internal::move(data[data.size()-1]); + __TBB_store_with_release(my_size, my_size-1); + itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); + data.pop_back(); + __TBB_ASSERT(mark<=data.size(), NULL); + } + else { // no convenient item to pop; postpone + itt_hide_store_word(tmp->next, pop_list); + pop_list = tmp; + } + } else { // PUSH_OP or PUSH_RVALUE_OP + __TBB_ASSERT(tmp->type == PUSH_OP || tmp->type == PUSH_RVALUE_OP, "Unknown operation" ); + __TBB_TRY{ + if (tmp->type == PUSH_OP) { + push_back_helper(*(tmp->elem), typename internal::use_element_copy_constructor::type()); + } else { + data.push_back(tbb::internal::move(*(tmp->elem))); + } + __TBB_store_with_release(my_size, my_size + 1); + itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); + } __TBB_CATCH(...) { + itt_store_word_with_release(tmp->status, uintptr_t(FAILED)); + } + } + } + + // second pass processes pop operations + while (pop_list) { + tmp = pop_list; + pop_list = itt_hide_load_word(pop_list->next); + __TBB_ASSERT(tmp->type == POP_OP, NULL); + if (data.empty()) { + itt_store_word_with_release(tmp->status, uintptr_t(FAILED)); + } + else { + __TBB_ASSERT(mark<=data.size(), NULL); + if (mark < data.size() && + compare(data[0], data[data.size()-1])) { + // there are newly pushed elems and the last one is + // higher than top + *(tmp->elem) = tbb::internal::move(data[data.size()-1]); + __TBB_store_with_release(my_size, my_size-1); + itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); + data.pop_back(); + } + else { // extract top and push last element down heap + *(tmp->elem) = tbb::internal::move(data[0]); + __TBB_store_with_release(my_size, my_size-1); + itt_store_word_with_release(tmp->status, uintptr_t(SUCCEEDED)); + reheap(); + } + } + } + + // heapify any leftover pushed elements before doing the next + // batch of operations + if (mark0) mark = 1; + for (; mark>1; + if (!compare(data[parent], to_place)) break; + data[cur_pos] = tbb::internal::move(data[parent]); + cur_pos = parent; + } while( cur_pos ); + data[cur_pos] = tbb::internal::move(to_place); + } + } + + //! Re-heapify after an extraction + /** Re-heapify by pushing last element down the heap from the root. */ + void reheap() { + size_type cur_pos=0, child=1; + + while (child < mark) { + size_type target = child; + if (child+1 < mark && compare(data[child], data[child+1])) + ++target; + // target now has the higher priority child + if (compare(data[target], data[data.size()-1])) break; + data[cur_pos] = tbb::internal::move(data[target]); + cur_pos = target; + child = (cur_pos<<1)+1; + } + if (cur_pos != data.size()-1) + data[cur_pos] = tbb::internal::move(data[data.size()-1]); + data.pop_back(); + if (mark > data.size()) mark = data.size(); + } + + void push_back_helper(const T& t, tbb::internal::true_type) { + data.push_back(t); + } + + void push_back_helper(const T&, tbb::internal::false_type) { + __TBB_ASSERT( false, "The type is not copy constructible. Copying push operation is impossible." ); + } +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +namespace internal { + +template +using priority_queue_t = concurrent_priority_queue< + T, + std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >, + pack_element_t<0, Args...>, std::less >, + std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >, + pack_element_t, cache_aligned_allocator > +>; +} + +// Deduction guide for the constructor from two iterators +template::value_type, + typename... Args +> concurrent_priority_queue(InputIterator, InputIterator, Args...) +-> internal::priority_queue_t; + +template +concurrent_priority_queue(std::initializer_list init_list, CompareOrAllocalor) +-> internal::priority_queue_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ +} // namespace interface5 + +using interface5::concurrent_priority_queue; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_priority_queue_H_include_area + +#endif /* __TBB_concurrent_priority_queue_H */ diff --git a/ohos/arm64-v8a/include/tbb/concurrent_queue.h b/ohos/arm64-v8a/include/tbb/concurrent_queue.h new file mode 100644 index 00000000..122f98e5 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_queue.h @@ -0,0 +1,479 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_queue_H +#define __TBB_concurrent_queue_H + +#define __TBB_concurrent_queue_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "internal/_concurrent_queue_impl.h" +#include "internal/_allocator_traits.h" + +namespace tbb { + +namespace strict_ppl { + +//! A high-performance thread-safe non-blocking concurrent queue. +/** Multiple threads may each push and pop concurrently. + Assignment construction is not allowed. + @ingroup containers */ +template > +class concurrent_queue: public internal::concurrent_queue_base_v3 { + template friend class internal::concurrent_queue_iterator; + + //! Allocator type + typedef typename tbb::internal::allocator_rebind::type page_allocator_type; + page_allocator_type my_allocator; + + //! Allocates a block of size n (bytes) + virtual void *allocate_block( size_t n ) __TBB_override { + void *b = reinterpret_cast(my_allocator.allocate( n )); + if( !b ) + internal::throw_exception(internal::eid_bad_alloc); + return b; + } + + //! Deallocates block created by allocate_block. + virtual void deallocate_block( void *b, size_t n ) __TBB_override { + my_allocator.deallocate( reinterpret_cast(b), n ); + } + + static void copy_construct_item(T* location, const void* src){ + new (location) T(*static_cast(src)); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + static void move_construct_item(T* location, const void* src) { + new (location) T( std::move(*static_cast(const_cast(src))) ); + } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ +public: + //! Element type in the queue. + typedef T value_type; + + //! Reference type + typedef T& reference; + + //! Const reference type + typedef const T& const_reference; + + //! Integral type for representing size of the queue. + typedef size_t size_type; + + //! Difference type for iterator + typedef ptrdiff_t difference_type; + + //! Allocator type + typedef A allocator_type; + + //! Construct empty queue + explicit concurrent_queue(const allocator_type& a = allocator_type()) : + my_allocator( a ) + { + } + + //! [begin,end) constructor + template + concurrent_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : + my_allocator( a ) + { + for( ; begin != end; ++begin ) + this->push(*begin); + } + + //! Copy constructor + concurrent_queue( const concurrent_queue& src, const allocator_type& a = allocator_type()) : + internal::concurrent_queue_base_v3(), my_allocator( a ) + { + this->assign( src, copy_construct_item ); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move constructors + concurrent_queue( concurrent_queue&& src ) : + internal::concurrent_queue_base_v3(), my_allocator( std::move(src.my_allocator) ) + { + this->internal_swap( src ); + } + + concurrent_queue( concurrent_queue&& src, const allocator_type& a ) : + internal::concurrent_queue_base_v3(), my_allocator( a ) + { + // checking that memory allocated by one instance of allocator can be deallocated + // with another + if( my_allocator == src.my_allocator) { + this->internal_swap( src ); + } else { + // allocators are different => performing per-element move + this->assign( src, move_construct_item ); + src.clear(); + } + } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + + //! Destroy queue + ~concurrent_queue(); + + //! Enqueue an item at tail of queue. + void push( const T& source ) { + this->internal_push( &source, copy_construct_item ); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + void push( T&& source ) { + this->internal_push( &source, move_construct_item ); + } + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + void emplace( Arguments&&... args ) { + push( T(std::forward( args )...) ); + } +#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + + //! Attempt to dequeue an item from head of queue. + /** Does not wait for item to become available. + Returns true if successful; false otherwise. */ + bool try_pop( T& result ) { + return this->internal_try_pop( &result ); + } + + //! Return the number of items in the queue; thread unsafe + size_type unsafe_size() const {return this->internal_size();} + + //! Equivalent to size()==0. + bool empty() const {return this->internal_empty();} + + //! Clear the queue. not thread-safe. + void clear() ; + + //! Return allocator object + allocator_type get_allocator() const { return this->my_allocator; } + + typedef internal::concurrent_queue_iterator iterator; + typedef internal::concurrent_queue_iterator const_iterator; + + //------------------------------------------------------------------------ + // The iterators are intended only for debugging. They are slow and not thread safe. + //------------------------------------------------------------------------ + iterator unsafe_begin() {return iterator(*this);} + iterator unsafe_end() {return iterator();} + const_iterator unsafe_begin() const {return const_iterator(*this);} + const_iterator unsafe_end() const {return const_iterator();} +} ; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// Deduction guide for the constructor from two iterators +template::value_type, + typename A = cache_aligned_allocator +> concurrent_queue(InputIterator, InputIterator, const A& = A()) +-> concurrent_queue; +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +template +concurrent_queue::~concurrent_queue() { + clear(); + this->internal_finish_clear(); +} + +template +void concurrent_queue::clear() { + T value; + while( !empty() ) try_pop(value); +} + +} // namespace strict_ppl + +//! A high-performance thread-safe blocking concurrent bounded queue. +/** This is the pre-PPL TBB concurrent queue which supports boundedness and blocking semantics. + Note that method names agree with the PPL-style concurrent queue. + Multiple threads may each push and pop concurrently. + Assignment construction is not allowed. + @ingroup containers */ +template > +class concurrent_bounded_queue: public internal::concurrent_queue_base_v8 { + template friend class internal::concurrent_queue_iterator; + typedef typename tbb::internal::allocator_rebind::type page_allocator_type; + + //! Allocator type + page_allocator_type my_allocator; + + typedef typename concurrent_queue_base_v3::padded_page padded_page; + typedef typename concurrent_queue_base_v3::copy_specifics copy_specifics; + + //! Class used to ensure exception-safety of method "pop" + class destroyer: internal::no_copy { + T& my_value; + public: + destroyer( T& value ) : my_value(value) {} + ~destroyer() {my_value.~T();} + }; + + T& get_ref( page& p, size_t index ) { + __TBB_ASSERT( index(static_cast(&p))->last)[index]; + } + + virtual void copy_item( page& dst, size_t index, const void* src ) __TBB_override { + new( &get_ref(dst,index) ) T(*static_cast(src)); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + virtual void move_item( page& dst, size_t index, const void* src ) __TBB_override { + new( &get_ref(dst,index) ) T( std::move(*static_cast(const_cast(src))) ); + } +#else + virtual void move_item( page&, size_t, const void* ) __TBB_override { + __TBB_ASSERT( false, "Unreachable code" ); + } +#endif + + virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) __TBB_override { + new( &get_ref(dst,dindex) ) T( get_ref( const_cast(src), sindex ) ); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + virtual void move_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) __TBB_override { + new( &get_ref(dst,dindex) ) T( std::move(get_ref( const_cast(src), sindex )) ); + } +#else + virtual void move_page_item( page&, size_t, const page&, size_t ) __TBB_override { + __TBB_ASSERT( false, "Unreachable code" ); + } +#endif + + virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) __TBB_override { + T& from = get_ref(src,index); + destroyer d(from); + *static_cast(dst) = tbb::internal::move( from ); + } + + virtual page *allocate_page() __TBB_override { + size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T); + page *p = reinterpret_cast(my_allocator.allocate( n )); + if( !p ) + internal::throw_exception(internal::eid_bad_alloc); + return p; + } + + virtual void deallocate_page( page *p ) __TBB_override { + size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T); + my_allocator.deallocate( reinterpret_cast(p), n ); + } + +public: + //! Element type in the queue. + typedef T value_type; + + //! Allocator type + typedef A allocator_type; + + //! Reference type + typedef T& reference; + + //! Const reference type + typedef const T& const_reference; + + //! Integral type for representing size of the queue. + /** Note that the size_type is a signed integral type. + This is because the size can be negative if there are pending pops without corresponding pushes. */ + typedef std::ptrdiff_t size_type; + + //! Difference type for iterator + typedef std::ptrdiff_t difference_type; + + //! Construct empty queue + explicit concurrent_bounded_queue(const allocator_type& a = allocator_type()) : + concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) + { + } + + //! Copy constructor + concurrent_bounded_queue( const concurrent_bounded_queue& src, const allocator_type& a = allocator_type()) + : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) + { + assign( src ); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move constructors + concurrent_bounded_queue( concurrent_bounded_queue&& src ) + : concurrent_queue_base_v8( sizeof(T) ), my_allocator( std::move(src.my_allocator) ) + { + internal_swap( src ); + } + + concurrent_bounded_queue( concurrent_bounded_queue&& src, const allocator_type& a ) + : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) + { + // checking that memory allocated by one instance of allocator can be deallocated + // with another + if( my_allocator == src.my_allocator) { + this->internal_swap( src ); + } else { + // allocators are different => performing per-element move + this->move_content( src ); + src.clear(); + } + } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + + //! [begin,end) constructor + template + concurrent_bounded_queue( InputIterator begin, InputIterator end, + const allocator_type& a = allocator_type()) + : concurrent_queue_base_v8( sizeof(T) ), my_allocator( a ) + { + for( ; begin != end; ++begin ) + internal_push_if_not_full(&*begin); + } + + //! Destroy queue + ~concurrent_bounded_queue(); + + //! Enqueue an item at tail of queue. + void push( const T& source ) { + internal_push( &source ); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move an item at tail of queue. + void push( T&& source ) { + internal_push_move( &source ); + } + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + void emplace( Arguments&&... args ) { + push( T(std::forward( args )...) ); + } +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + + //! Dequeue item from head of queue. + /** Block until an item becomes available, and then dequeue it. */ + void pop( T& destination ) { + internal_pop( &destination ); + } + +#if TBB_USE_EXCEPTIONS + //! Abort all pending queue operations + void abort() { + internal_abort(); + } +#endif + + //! Enqueue an item at tail of queue if queue is not already full. + /** Does not wait for queue to become not full. + Returns true if item is pushed; false if queue was already full. */ + bool try_push( const T& source ) { + return internal_push_if_not_full( &source ); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move an item at tail of queue if queue is not already full. + /** Does not wait for queue to become not full. + Returns true if item is pushed; false if queue was already full. */ + bool try_push( T&& source ) { + return internal_push_move_if_not_full( &source ); + } +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + bool try_emplace( Arguments&&... args ) { + return try_push( T(std::forward( args )...) ); + } +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + + //! Attempt to dequeue an item from head of queue. + /** Does not wait for item to become available. + Returns true if successful; false otherwise. */ + bool try_pop( T& destination ) { + return internal_pop_if_present( &destination ); + } + + //! Return number of pushes minus number of pops. + /** Note that the result can be negative if there are pops waiting for the + corresponding pushes. The result can also exceed capacity() if there + are push operations in flight. */ + size_type size() const {return internal_size();} + + //! Equivalent to size()<=0. + bool empty() const {return internal_empty();} + + //! Maximum number of allowed elements + size_type capacity() const { + return my_capacity; + } + + //! Set the capacity + /** Setting the capacity to 0 causes subsequent try_push operations to always fail, + and subsequent push operations to block forever. */ + void set_capacity( size_type new_capacity ) { + internal_set_capacity( new_capacity, sizeof(T) ); + } + + //! return allocator object + allocator_type get_allocator() const { return this->my_allocator; } + + //! clear the queue. not thread-safe. + void clear() ; + + typedef internal::concurrent_queue_iterator iterator; + typedef internal::concurrent_queue_iterator const_iterator; + + //------------------------------------------------------------------------ + // The iterators are intended only for debugging. They are slow and not thread safe. + //------------------------------------------------------------------------ + iterator unsafe_begin() {return iterator(*this);} + iterator unsafe_end() {return iterator();} + const_iterator unsafe_begin() const {return const_iterator(*this);} + const_iterator unsafe_end() const {return const_iterator();} + +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// guide for concurrent_bounded_queue(InputIterator, InputIterator, ...) +template::value_type, + typename A = cache_aligned_allocator +> concurrent_bounded_queue(InputIterator, InputIterator, const A& = A()) +-> concurrent_bounded_queue; +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +template +concurrent_bounded_queue::~concurrent_bounded_queue() { + clear(); + internal_finish_clear(); +} + +template +void concurrent_bounded_queue::clear() { + T value; + while( try_pop(value) ) /*noop*/; +} + +using strict_ppl::concurrent_queue; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_queue_H_include_area + +#endif /* __TBB_concurrent_queue_H */ diff --git a/ohos/arm64-v8a/include/tbb/concurrent_set.h b/ohos/arm64-v8a/include/tbb/concurrent_set.h new file mode 100644 index 00000000..ecb21624 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_set.h @@ -0,0 +1,304 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_set_H +#define __TBB_concurrent_set_H + +#define __TBB_concurrent_set_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if !TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS +#error Set TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS to include concurrent_set.h +#endif + +#include "tbb/tbb_config.h" + +// concurrent_set requires C++11 support +#if __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT + +#include "internal/_concurrent_skip_list_impl.h" + +namespace tbb { +namespace interface10 { + +// TODO: test this class +template +class set_traits { +public: + static constexpr size_t MAX_LEVEL = MAX_LEVELS; + using random_level_generator_type = RandomGenerator; + using key_type = Key; + using value_type = key_type; + using compare_type = KeyCompare; + using value_compare = compare_type; + using reference = value_type & ; + using const_reference = const value_type&; + using allocator_type = Allocator; + using mutex_type = tbb::spin_mutex; + using node_type = tbb::internal::node_handle, allocator_type>; + + static const bool allow_multimapping = AllowMultimapping; + + static const key_type& get_key(const_reference val) { + return val; + } + + static value_compare value_comp(compare_type comp) { return comp; } +}; + +template +class concurrent_multiset; + +template , typename Allocator = tbb_allocator> +class concurrent_set + : public internal::concurrent_skip_list, 64, Allocator, false>> { + using traits_type = set_traits, 64, Allocator, false>; + using base_type = internal::concurrent_skip_list; +#if __TBB_EXTRA_DEBUG +public: +#endif + using base_type::allow_multimapping; +public: + using key_type = Key; + using value_type = typename traits_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using reverse_iterator = typename base_type::reverse_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + using node_type = typename base_type::node_type; + + using base_type::insert; + + concurrent_set() = default; + + explicit concurrent_set(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {} + + explicit concurrent_set(const allocator_type& alloc) : base_type(key_compare(), alloc) {} + + template< class InputIt > + concurrent_set(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(first, last, comp, alloc) {} + + template< class InputIt > + concurrent_set(InputIt first, InputIt last, const allocator_type& alloc) : base_type(first, last, key_compare(), alloc) {} + + /** Copy constructor */ + concurrent_set(const concurrent_set&) = default; + + concurrent_set(const concurrent_set& other, const allocator_type& alloc) : base_type(other, alloc) {} + + concurrent_set(concurrent_set&&) = default; + + concurrent_set(concurrent_set&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {} + + concurrent_set(std::initializer_list init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(init); + } + + concurrent_set(std::initializer_list init, const allocator_type& alloc) + : base_type(key_compare(), alloc) { + insert(init); + } + + concurrent_set& operator=(const concurrent_set& other) { + return static_cast(base_type::operator=(other)); + } + + concurrent_set& operator=(concurrent_set&& other) { + return static_cast(base_type::operator=(std::move(other))); + } + + template + void merge(concurrent_set& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_set&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multiset& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multiset&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_set + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace internal { + +using namespace tbb::internal; + +template typename Set, typename Key, typename... Args> +using c_set_t = Set 0) && !is_allocator_v >, + pack_element_t<0, Args...>, std::less >, + std::conditional_t< (sizeof...(Args) > 0) && is_allocator_v >, + pack_element_t, tbb_allocator > >; +} // namespace internal + +template +concurrent_set(It, It, Args...) +-> internal::c_set_t, Args...>; + +template +concurrent_set(std::initializer_list, Args...) +-> internal::c_set_t; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +template , typename Allocator = tbb_allocator> +class concurrent_multiset + : public internal::concurrent_skip_list, 64, Allocator, true>> { + using traits_type = set_traits, 64, Allocator, true>; + using base_type = internal::concurrent_skip_list; +#if __TBB_EXTRA_DEBUG +public: +#endif + using base_type::allow_multimapping; +public: + using key_type = Key; + using value_type = typename traits_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using value_compare = typename base_type::value_compare; + using allocator_type = Allocator; + + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::pointer; + + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + using reverse_iterator = typename base_type::reverse_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + using node_type = typename base_type::node_type; + + using base_type::insert; + + concurrent_multiset() = default; + + explicit concurrent_multiset(const key_compare& comp, const allocator_type& alloc = allocator_type()) : base_type(comp, alloc) {} + + explicit concurrent_multiset(const allocator_type& alloc) : base_type(key_compare(), alloc) {} + + template< class InputIt > + concurrent_multiset(InputIt first, InputIt last, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(first, last); + } + + template< class InputIt > + concurrent_multiset(InputIt first, InputIt last, const allocator_type& alloc) : base_type(key_compare(), alloc) { + insert(first, last); + } + + /** Copy constructor */ + concurrent_multiset(const concurrent_multiset&) = default; + + concurrent_multiset(const concurrent_multiset& other, const allocator_type& alloc) : base_type(other, alloc) {} + + concurrent_multiset(concurrent_multiset&&) = default; + + concurrent_multiset(concurrent_multiset&& other, const allocator_type& alloc) : base_type(std::move(other), alloc) {} + + concurrent_multiset(std::initializer_list init, const key_compare& comp = Comp(), const allocator_type& alloc = allocator_type()) + : base_type(comp, alloc) { + insert(init); + } + + concurrent_multiset(std::initializer_list init, const allocator_type& alloc) + : base_type(key_compare(), alloc) { + insert(init); + } + + concurrent_multiset& operator=(const concurrent_multiset& other) { + return static_cast(base_type::operator=(other)); + } + + concurrent_multiset& operator=(concurrent_multiset&& other) { + return static_cast(base_type::operator=(std::move(other))); + } + + template + void merge(concurrent_set& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_set&& source) { + this->internal_merge(std::move(source)); + } + + template + void merge(concurrent_multiset& source) { + this->internal_merge(source); + } + + template + void merge(concurrent_multiset&& source) { + this->internal_merge(std::move(source)); + } +}; // class concurrent_multiset + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + + +template +concurrent_multiset(It, It, Args...) +-> internal::c_set_t, Args...>; + +template +concurrent_multiset(std::initializer_list, Args...) +-> internal::c_set_t; + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +} // namespace interface10 + +using interface10::concurrent_set; +using interface10::concurrent_multiset; + +} // namespace tbb + +#endif // __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_set_H_include_area + +#endif // __TBB_concurrent_set_H diff --git a/ohos/arm64-v8a/include/tbb/concurrent_unordered_map.h b/ohos/arm64-v8a/include/tbb/concurrent_unordered_map.h new file mode 100644 index 00000000..a9d8df8a --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_unordered_map.h @@ -0,0 +1,492 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* Container implementations in this header are based on PPL implementations + provided by Microsoft. */ + +#ifndef __TBB_concurrent_unordered_map_H +#define __TBB_concurrent_unordered_map_H + +#define __TBB_concurrent_unordered_map_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "internal/_concurrent_unordered_impl.h" + +namespace tbb +{ + +namespace interface5 { + +// Template class for hash map traits +template +class concurrent_unordered_map_traits +{ +protected: + typedef std::pair value_type; + typedef Key key_type; + typedef Hash_compare hash_compare; + typedef typename tbb::internal::allocator_rebind::type allocator_type; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef tbb::internal::node_handle::node, + allocator_type> node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + + enum { allow_multimapping = Allow_multimapping }; + + concurrent_unordered_map_traits() : my_hash_compare() {} + concurrent_unordered_map_traits(const hash_compare& hc) : my_hash_compare(hc) {} + + template + static const Key& get_key(const std::pair& value) { + return (value.first); + } + + hash_compare my_hash_compare; // the comparator predicate for keys +}; + +template +class concurrent_unordered_multimap; + +template , typename Key_equality = std::equal_to, + typename Allocator = tbb::tbb_allocator > > +class concurrent_unordered_map : + public internal::concurrent_unordered_base< concurrent_unordered_map_traits, Allocator, false> > +{ + // Base type definitions + typedef internal::hash_compare hash_compare; + typedef concurrent_unordered_map_traits traits_type; + typedef internal::concurrent_unordered_base< traits_type > base_type; +#if __TBB_EXTRA_DEBUG +public: +#endif + using traits_type::allow_multimapping; +public: + using base_type::end; + using base_type::find; + using base_type::insert; + + // Type definitions + typedef Key key_type; + typedef typename base_type::value_type value_type; + typedef T mapped_type; + typedef Hasher hasher; + typedef Key_equality key_equal; + typedef hash_compare key_compare; + + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::pointer pointer; + typedef typename base_type::const_pointer const_pointer; + typedef typename base_type::reference reference; + typedef typename base_type::const_reference const_reference; + + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + typedef typename base_type::iterator local_iterator; + typedef typename base_type::const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename base_type::node_type node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + + // Construction/destruction/copying + explicit concurrent_unordered_map(size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), + const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + {} + + concurrent_unordered_map(size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + {} + + concurrent_unordered_map(size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + {} + + explicit concurrent_unordered_map(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) + {} + + template + concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), + const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(first, last); + } + + template + concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(first, last); + } + + template + concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(first, last); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Constructor from initializer_list + concurrent_unordered_map(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), + const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(il.begin(),il.end()); + } + + concurrent_unordered_map(std::initializer_list il, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(il.begin(), il.end()); + } + + concurrent_unordered_map(std::initializer_list il, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(il.begin(), il.end()); + } + +#endif //# __TBB_INITIALIZER_LISTS_PRESENT + + +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT + concurrent_unordered_map(const concurrent_unordered_map& table) + : base_type(table) + {} + + concurrent_unordered_map& operator=(const concurrent_unordered_map& table) + { + return static_cast(base_type::operator=(table)); + } + + concurrent_unordered_map(concurrent_unordered_map&& table) + : base_type(std::move(table)) + {} + + concurrent_unordered_map& operator=(concurrent_unordered_map&& table) + { + return static_cast(base_type::operator=(std::move(table))); + } +#endif //__TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT + concurrent_unordered_map(concurrent_unordered_map&& table, const Allocator& a) : base_type(std::move(table), a) + {} +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void merge(concurrent_unordered_map& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_map&& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multimap& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multimap&& source) + { this->internal_merge(source); } + +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT + + concurrent_unordered_map(const concurrent_unordered_map& table, const Allocator& a) + : base_type(table, a) + {} + + // Observers + mapped_type& operator[](const key_type& key) + { + iterator where = find(key); + + if (where == end()) + { + where = insert(std::pair(key, mapped_type())).first; + } + + return ((*where).second); + } + + mapped_type& at(const key_type& key) + { + iterator where = find(key); + + if (where == end()) + { + tbb::internal::throw_exception(tbb::internal::eid_invalid_key); + } + + return ((*where).second); + } + + const mapped_type& at(const key_type& key) const + { + const_iterator where = find(key); + + if (where == end()) + { + tbb::internal::throw_exception(tbb::internal::eid_invalid_key); + } + + return ((*where).second); + } +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace internal { +using namespace tbb::internal; + +template typename Map, typename Key, typename Element, typename... Args> +using cu_map_t = Map< + Key, Element, + std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >, + pack_element_t<0, Args...>, tbb_hash >, + std::conditional_t< (sizeof...(Args)>1) && !is_allocator_v< pack_element_t<1, Args...> >, + pack_element_t<1, Args...>, std::equal_to >, + std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >, + pack_element_t, tbb_allocator > > +>; +} + +// Deduction guide for the constructor from two iterators +template +concurrent_unordered_map (I, I) +-> internal::cu_map_t, internal::iterator_mapped_t>; + +// Deduction guide for the constructor from two iterators and hasher/equality/allocator +template +concurrent_unordered_map(I, I, size_t, Args...) +-> internal::cu_map_t, internal::iterator_mapped_t, Args...>; + +// Deduction guide for the constructor from an initializer_list +template +concurrent_unordered_map(std::initializer_list>) +-> internal::cu_map_t; + +// Deduction guide for the constructor from an initializer_list and hasher/equality/allocator +template +concurrent_unordered_map(std::initializer_list>, size_t, Args...) +-> internal::cu_map_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +template < typename Key, typename T, typename Hasher = tbb::tbb_hash, typename Key_equality = std::equal_to, + typename Allocator = tbb::tbb_allocator > > +class concurrent_unordered_multimap : + public internal::concurrent_unordered_base< concurrent_unordered_map_traits< Key, T, + internal::hash_compare, Allocator, true> > +{ + // Base type definitions + typedef internal::hash_compare hash_compare; + typedef concurrent_unordered_map_traits traits_type; + typedef internal::concurrent_unordered_base base_type; +#if __TBB_EXTRA_DEBUG +public: +#endif + using traits_type::allow_multimapping; +public: + using base_type::insert; + + // Type definitions + typedef Key key_type; + typedef typename base_type::value_type value_type; + typedef T mapped_type; + typedef Hasher hasher; + typedef Key_equality key_equal; + typedef hash_compare key_compare; + + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::pointer pointer; + typedef typename base_type::const_pointer const_pointer; + typedef typename base_type::reference reference; + typedef typename base_type::const_reference const_reference; + + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + typedef typename base_type::iterator local_iterator; + typedef typename base_type::const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename base_type::node_type node_type; +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT + + // Construction/destruction/copying + explicit concurrent_unordered_multimap(size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), + const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + {} + + concurrent_unordered_multimap(size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + {} + + concurrent_unordered_multimap(size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + {} + + explicit concurrent_unordered_multimap(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) + {} + + template + concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), + const allocator_type& a = allocator_type()) + : base_type(n_of_buckets,key_compare(a_hasher,a_keyeq), a) + { + insert(first, last); + } + + template + concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(first, last); + } + + template + concurrent_unordered_multimap(Iterator first, Iterator last, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(first, last); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Constructor from initializer_list + concurrent_unordered_multimap(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), + const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(il.begin(),il.end()); + } + + concurrent_unordered_multimap(std::initializer_list il, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(il.begin(), il.end()); + } + + concurrent_unordered_multimap(std::initializer_list il, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(il.begin(), il.end()); + } + +#endif //# __TBB_INITIALIZER_LISTS_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT + concurrent_unordered_multimap(const concurrent_unordered_multimap& table) + : base_type(table) + {} + + concurrent_unordered_multimap& operator=(const concurrent_unordered_multimap& table) + { + return static_cast(base_type::operator=(table)); + } + + concurrent_unordered_multimap(concurrent_unordered_multimap&& table) + : base_type(std::move(table)) + {} + + concurrent_unordered_multimap& operator=(concurrent_unordered_multimap&& table) + { + return static_cast(base_type::operator=(std::move(table))); + } +#endif //__TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT + concurrent_unordered_multimap(concurrent_unordered_multimap&& table, const Allocator& a) : base_type(std::move(table), a) + {} +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void merge(concurrent_unordered_map& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_map&& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multimap& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multimap&& source) + { this->internal_merge(source); } + +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT + + concurrent_unordered_multimap(const concurrent_unordered_multimap& table, const Allocator& a) + : base_type(table, a) + {} +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +// Deduction guide for the constructor from two iterators +template +concurrent_unordered_multimap (I, I) +-> internal::cu_map_t, internal::iterator_mapped_t>; + +// Deduction guide for the constructor from two iterators and hasher/equality/allocator +template +concurrent_unordered_multimap(I, I, size_t, Args...) +-> internal::cu_map_t, internal::iterator_mapped_t, Args...>; + +// Deduction guide for the constructor from an initializer_list +template +concurrent_unordered_multimap(std::initializer_list>) +-> internal::cu_map_t; + +// Deduction guide for the constructor from an initializer_list and hasher/equality/allocator +template +concurrent_unordered_multimap(std::initializer_list>, size_t, Args...) +-> internal::cu_map_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ +} // namespace interface5 + +using interface5::concurrent_unordered_map; +using interface5::concurrent_unordered_multimap; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_unordered_map_H_include_area + +#endif// __TBB_concurrent_unordered_map_H diff --git a/ohos/arm64-v8a/include/tbb/concurrent_unordered_set.h b/ohos/arm64-v8a/include/tbb/concurrent_unordered_set.h new file mode 100644 index 00000000..edb02565 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_unordered_set.h @@ -0,0 +1,448 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* Container implementations in this header are based on PPL implementations + provided by Microsoft. */ + +#ifndef __TBB_concurrent_unordered_set_H +#define __TBB_concurrent_unordered_set_H + +#define __TBB_concurrent_unordered_set_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "internal/_concurrent_unordered_impl.h" + +namespace tbb +{ + +namespace interface5 { + +// Template class for hash set traits +template +class concurrent_unordered_set_traits +{ +protected: + typedef Key value_type; + typedef Key key_type; + typedef Hash_compare hash_compare; + typedef typename tbb::internal::allocator_rebind::type allocator_type; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef tbb::internal::node_handle::node, + allocator_type> node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + + enum { allow_multimapping = Allow_multimapping }; + + concurrent_unordered_set_traits() : my_hash_compare() {} + concurrent_unordered_set_traits(const hash_compare& hc) : my_hash_compare(hc) {} + + static const Key& get_key(const value_type& value) { + return value; + } + + hash_compare my_hash_compare; // the comparator predicate for keys +}; + +template +class concurrent_unordered_multiset; + +template , typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > +class concurrent_unordered_set : public internal::concurrent_unordered_base< concurrent_unordered_set_traits, Allocator, false> > +{ + // Base type definitions + typedef internal::hash_compare hash_compare; + typedef concurrent_unordered_set_traits traits_type; + typedef internal::concurrent_unordered_base< traits_type > base_type; +#if __TBB_EXTRA_DEBUG +public: +#endif + using traits_type::allow_multimapping; +public: + using base_type::insert; + + // Type definitions + typedef Key key_type; + typedef typename base_type::value_type value_type; + typedef Key mapped_type; + typedef Hasher hasher; + typedef Key_equality key_equal; + typedef hash_compare key_compare; + + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::pointer pointer; + typedef typename base_type::const_pointer const_pointer; + typedef typename base_type::reference reference; + typedef typename base_type::const_reference const_reference; + + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + typedef typename base_type::iterator local_iterator; + typedef typename base_type::const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename base_type::node_type node_type; +#endif /*__TBB_UNORDERED_NODE_HANDLE_PRESENT*/ + + // Construction/destruction/copying + explicit concurrent_unordered_set(size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), + const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + {} + + concurrent_unordered_set(size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + {} + + concurrent_unordered_set(size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + {} + + explicit concurrent_unordered_set(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) + {} + + template + concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(first, last); + } + + template + concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(first, last); + } + + template + concurrent_unordered_set(Iterator first, Iterator last, size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(first, last); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Constructor from initializer_list + concurrent_unordered_set(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, const hasher& a_hasher = hasher(), + const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(il.begin(),il.end()); + } + + concurrent_unordered_set(std::initializer_list il, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(il.begin(), il.end()); + } + + concurrent_unordered_set(std::initializer_list il, size_type n_of_buckets, const hasher& a_hasher, const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(il.begin(), il.end()); + } + +#endif //# __TBB_INITIALIZER_LISTS_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT + concurrent_unordered_set(const concurrent_unordered_set& table) + : base_type(table) + {} + + concurrent_unordered_set& operator=(const concurrent_unordered_set& table) + { + return static_cast(base_type::operator=(table)); + } + + concurrent_unordered_set(concurrent_unordered_set&& table) + : base_type(std::move(table)) + {} + + concurrent_unordered_set& operator=(concurrent_unordered_set&& table) + { + return static_cast(base_type::operator=(std::move(table))); + } +#endif //__TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT + concurrent_unordered_set(concurrent_unordered_set&& table, const Allocator& a) + : base_type(std::move(table), a) + {} +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void merge(concurrent_unordered_set& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_set&& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multiset& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multiset&& source) + { this->internal_merge(source); } + +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT + + concurrent_unordered_set(const concurrent_unordered_set& table, const Allocator& a) + : base_type(table, a) + {} + +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace internal { +using namespace tbb::internal; + +template typename Set, typename T, typename... Args> +using cu_set_t = Set < + T, + std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >, + pack_element_t<0, Args...>, tbb_hash >, + std::conditional_t< (sizeof...(Args)>1) && !is_allocator_v< pack_element_t<1, Args...> >, + pack_element_t<1, Args...>, std::equal_to >, + std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t >, + pack_element_t, tbb_allocator > +>; +} + +// Deduction guide for the constructor from two iterators +template +concurrent_unordered_set(I, I) +-> internal::cu_set_t>; + +// Deduction guide for the constructor from two iterators and hasher/equality/allocator +template +concurrent_unordered_set(I, I, size_t, Args...) +-> internal::cu_set_t, Args...>; + +// Deduction guide for the constructor from an initializer_list +template +concurrent_unordered_set(std::initializer_list) +-> internal::cu_set_t; + +// Deduction guide for the constructor from an initializer_list and hasher/equality/allocator +template +concurrent_unordered_set(std::initializer_list, size_t, Args...) +-> internal::cu_set_t; + +#endif /*__TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +template , typename Key_equality = std::equal_to, + typename Allocator = tbb::tbb_allocator > +class concurrent_unordered_multiset : + public internal::concurrent_unordered_base< concurrent_unordered_set_traits, Allocator, true> > +{ + // Base type definitions + typedef internal::hash_compare hash_compare; + typedef concurrent_unordered_set_traits traits_type; + typedef internal::concurrent_unordered_base< traits_type > base_type; +#if __TBB_EXTRA_DEBUG +public: +#endif + using traits_type::allow_multimapping; +public: + using base_type::insert; + + // Type definitions + typedef Key key_type; + typedef typename base_type::value_type value_type; + typedef Key mapped_type; + typedef Hasher hasher; + typedef Key_equality key_equal; + typedef hash_compare key_compare; + + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::pointer pointer; + typedef typename base_type::const_pointer const_pointer; + typedef typename base_type::reference reference; + typedef typename base_type::const_reference const_reference; + + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + typedef typename base_type::iterator local_iterator; + typedef typename base_type::const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename base_type::node_type node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + + // Construction/destruction/copying + explicit concurrent_unordered_multiset(size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), + const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + {} + + concurrent_unordered_multiset(size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + {} + + concurrent_unordered_multiset(size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + {} + + explicit concurrent_unordered_multiset(const Allocator& a) : base_type(base_type::initial_bucket_number, key_compare(), a) + {} + + template + concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), + const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(first, last); + } + + template + concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(first, last); + } + + template + concurrent_unordered_multiset(Iterator first, Iterator last, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(first, last); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Constructor from initializer_list + concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets = base_type::initial_bucket_number, + const hasher& a_hasher = hasher(), const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) + : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) + { + insert(il.begin(),il.end()); + } + + concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets, const allocator_type& a) + : base_type(n_of_buckets, key_compare(hasher(), key_equal()), a) + { + insert(il.begin(), il.end()); + } + + concurrent_unordered_multiset(std::initializer_list il, size_type n_of_buckets, const hasher& a_hasher, + const allocator_type& a) + : base_type(n_of_buckets, key_compare(a_hasher, key_equal()), a) + { + insert(il.begin(), il.end()); + } + +#endif //# __TBB_INITIALIZER_LISTS_PRESENT + + +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT + concurrent_unordered_multiset(const concurrent_unordered_multiset& table) + : base_type(table) + {} + + concurrent_unordered_multiset& operator=(const concurrent_unordered_multiset& table) + { + return static_cast(base_type::operator=(table)); + } + + concurrent_unordered_multiset(concurrent_unordered_multiset&& table) + : base_type(std::move(table)) + {} + + concurrent_unordered_multiset& operator=(concurrent_unordered_multiset&& table) + { + return static_cast(base_type::operator=(std::move(table))); + } +#endif //__TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_MOVE_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT + concurrent_unordered_multiset(concurrent_unordered_multiset&& table, const Allocator& a) + : base_type(std::move(table), a) + { + } +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void merge(concurrent_unordered_set& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_set&& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multiset& source) + { this->internal_merge(source); } + + template + void merge(concurrent_unordered_multiset&& source) + { this->internal_merge(source); } + +#endif //__TBB_UNORDERED_NODE_HANDLE_PRESENT + + concurrent_unordered_multiset(const concurrent_unordered_multiset& table, const Allocator& a) + : base_type(table, a) + {} +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +// Deduction guide for the constructor from two iterators +template +concurrent_unordered_multiset(I, I) +-> internal::cu_set_t>; + +// Deduction guide for the constructor from two iterators and hasher/equality/allocator +template +concurrent_unordered_multiset(I, I, size_t, Args...) +-> internal::cu_set_t, Args...>; + +// Deduction guide for the constructor from an initializer_list +template +concurrent_unordered_multiset(std::initializer_list) +-> internal::cu_set_t; + +// Deduction guide for the constructor from an initializer_list and hasher/equality/allocator +template +concurrent_unordered_multiset(std::initializer_list, size_t, Args...) +-> internal::cu_set_t; + +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ +} // namespace interface5 + +using interface5::concurrent_unordered_set; +using interface5::concurrent_unordered_multiset; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_concurrent_unordered_set_H_include_area + +#endif// __TBB_concurrent_unordered_set_H diff --git a/ohos/arm64-v8a/include/tbb/concurrent_vector.h b/ohos/arm64-v8a/include/tbb/concurrent_vector.h new file mode 100644 index 00000000..a4988aaa --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/concurrent_vector.h @@ -0,0 +1,1396 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_vector_H +#define __TBB_concurrent_vector_H + +#define __TBB_concurrent_vector_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_stddef.h" +#include "tbb_exception.h" +#include "atomic.h" +#include "cache_aligned_allocator.h" +#include "blocked_range.h" +#include "tbb_machine.h" +#include "tbb_profiling.h" +#include +#include // for memset() +#include __TBB_STD_SWAP_HEADER +#include +#include + +#include "internal/_allocator_traits.h" + +#if _MSC_VER==1500 && !__INTEL_COMPILER + // VS2008/VC9 seems to have an issue; limits pull in math.h + #pragma warning( push ) + #pragma warning( disable: 4985 ) +#endif +#include /* std::numeric_limits */ +#if _MSC_VER==1500 && !__INTEL_COMPILER + #pragma warning( pop ) +#endif + +#if __TBB_INITIALIZER_LISTS_PRESENT + #include +#endif + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Workaround for overzealous compiler warnings in /Wp64 mode + #pragma warning (push) +#if defined(_Wp64) + #pragma warning (disable: 4267) +#endif + #pragma warning (disable: 4127) //warning C4127: conditional expression is constant +#endif + +namespace tbb { + +template > +class concurrent_vector; + +//! @cond INTERNAL +namespace internal { + + template + class vector_iterator; + + //! Bad allocation marker + static void *const vector_allocation_error_flag = reinterpret_cast(size_t(63)); + + //! Exception helper function + template + void handle_unconstructed_elements(T* array, size_t n_of_elements){ + std::memset( static_cast(array), 0, n_of_elements * sizeof( T ) ); + } + + //! Base class of concurrent vector implementation. + /** @ingroup containers */ + class concurrent_vector_base_v3 { + protected: + + // Basic types declarations + typedef size_t segment_index_t; + typedef size_t size_type; + + // Using enumerations due to Mac linking problems of static const variables + enum { + // Size constants + default_initial_segments = 1, // 2 initial items + //! Number of slots for segment pointers inside the class + pointers_per_short_table = 3, // to fit into 8 words of entire structure + pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit + }; + + struct segment_not_used {}; + struct segment_allocated {}; + struct segment_allocation_failed {}; + + class segment_t; + class segment_value_t { + void* array; + private: + //TODO: More elegant way to grant access to selected functions _only_? + friend class segment_t; + explicit segment_value_t(void* an_array):array(an_array) {} + public: + friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;} + friend bool operator==(segment_value_t const& lhs, segment_allocated) { return lhs.array > internal::vector_allocation_error_flag;} + friend bool operator==(segment_value_t const& lhs, segment_allocation_failed) { return lhs.array == internal::vector_allocation_error_flag;} + template + friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);} + + template + T* pointer() const { return static_cast(const_cast(array)); } + }; + + friend void enforce_segment_allocated(segment_value_t const& s, internal::exception_id exception = eid_bad_last_alloc){ + if(s != segment_allocated()){ + internal::throw_exception(exception); + } + } + + // Segment pointer. + class segment_t { + atomic array; + public: + segment_t(){ store(segment_not_used());} + //Copy ctor and assignment operator are defined to ease using of stl algorithms. + //These algorithms usually not a synchronization point, so, semantic is + //intentionally relaxed here. + segment_t(segment_t const& rhs ){ array.store(rhs.array.load());} + + void swap(segment_t & rhs ){ + tbb::internal::swap(array, rhs.array); + } + + segment_t& operator=(segment_t const& rhs ){ + array.store(rhs.array.load()); + return *this; + } + + template + segment_value_t load() const { return segment_value_t(array.load());} + + template + void store(segment_not_used) { + array.store(0); + } + + template + void store(segment_allocation_failed) { + __TBB_ASSERT(load() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical"); + array.store(internal::vector_allocation_error_flag); + } + + template + void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) { + __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(), + "other overloads of store should be used for marking segment as not_used or allocation_failed" ); + array.store(allocated_segment_pointer); + } + +#if TBB_USE_ASSERT + ~segment_t() { + __TBB_ASSERT(load() != segment_allocated(), "should have been freed by clear" ); + } +#endif /* TBB_USE_ASSERT */ + }; + friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true); + + // Data fields + + //! allocator function pointer + void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t); + + //! count of segments in the first block + atomic my_first_block; + + //! Requested size of vector + atomic my_early_size; + + //! Pointer to the segments table + atomic my_segment; + + //! embedded storage of segment pointers + segment_t my_storage[pointers_per_short_table]; + + // Methods + + concurrent_vector_base_v3() { + //Here the semantic is intentionally relaxed. + //The reason this is next: + //Object that is in middle of construction (i.e. its constructor is not yet finished) + //cannot be used concurrently until the construction is finished. + //Thus to flag other threads that construction is finished, some synchronization with + //acquire-release semantic should be done by the (external) code that uses the vector. + //So, no need to do the synchronization inside the vector. + + my_early_size.store(0); + my_first_block.store(0); // here is not default_initial_segments + my_segment.store(my_storage); + } + + __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); + + //these helpers methods use the fact that segments are allocated so + //that every segment size is a (increasing) power of 2. + //with one exception 0 segment has size of 2 as well segment 1; + //e.g. size of segment with index of 3 is 2^3=8; + static segment_index_t segment_index_of( size_type index ) { + return segment_index_t( __TBB_Log2( index|1 ) ); + } + + static segment_index_t segment_base( segment_index_t k ) { + return (segment_index_t(1)< + friend class vector_iterator; + + }; + + inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent_vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) { + lhs.swap(rhs); + } + + typedef concurrent_vector_base_v3 concurrent_vector_base; + + //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/ + /** Value is either the T or const T type of the container. + @ingroup containers */ + template + class vector_iterator + { + //! concurrent_vector over which we are iterating. + Container* my_vector; + + //! Index into the vector + size_t my_index; + + //! Caches my_vector->internal_subscript(my_index) + /** NULL if cached value is not available */ + mutable Value* my_item; + + template + friend vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ); + + template + friend bool operator==( const vector_iterator& i, const vector_iterator& j ); + + template + friend bool operator<( const vector_iterator& i, const vector_iterator& j ); + + template + friend ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ); + + template + friend class internal::vector_iterator; + +#if !__TBB_TEMPLATE_FRIENDS_BROKEN + template + friend class tbb::concurrent_vector; +#else +public: +#endif + + vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) : + my_vector(const_cast(&vector)), + my_index(index), + my_item(static_cast(ptr)) + {} + + public: + //! Default constructor + vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {} + + vector_iterator( const vector_iterator& other ) : + my_vector(other.my_vector), + my_index(other.my_index), + my_item(other.my_item) + {} + + vector_iterator& operator=( const vector_iterator& other ) + { + my_vector=other.my_vector; + my_index=other.my_index; + my_item=other.my_item; + return *this; + } + + vector_iterator operator+( ptrdiff_t offset ) const { + return vector_iterator( *my_vector, my_index+offset ); + } + vector_iterator &operator+=( ptrdiff_t offset ) { + my_index+=offset; + my_item = NULL; + return *this; + } + vector_iterator operator-( ptrdiff_t offset ) const { + return vector_iterator( *my_vector, my_index-offset ); + } + vector_iterator &operator-=( ptrdiff_t offset ) { + my_index-=offset; + my_item = NULL; + return *this; + } + Value& operator*() const { + Value* item = my_item; + if( !item ) { + item = my_item = &my_vector->internal_subscript(my_index); + } + __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" ); + return *item; + } + Value& operator[]( ptrdiff_t k ) const { + return my_vector->internal_subscript(my_index+k); + } + Value* operator->() const {return &operator*();} + + //! Pre increment + vector_iterator& operator++() { + size_t element_index = ++my_index; + if( my_item ) { + //TODO: consider using of knowledge about "first_block optimization" here as well? + if( concurrent_vector_base::is_first_element_in_segment(element_index)) { + //if the iterator crosses a segment boundary, the pointer become invalid + //as possibly next segment is in another memory location + my_item= NULL; + } else { + ++my_item; + } + } + return *this; + } + + //! Pre decrement + vector_iterator& operator--() { + __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); + size_t element_index = my_index--; + if( my_item ) { + if(concurrent_vector_base::is_first_element_in_segment(element_index)) { + //if the iterator crosses a segment boundary, the pointer become invalid + //as possibly next segment is in another memory location + my_item= NULL; + } else { + --my_item; + } + } + return *this; + } + + //! Post increment + vector_iterator operator++(int) { + vector_iterator result = *this; + operator++(); + return result; + } + + //! Post decrement + vector_iterator operator--(int) { + vector_iterator result = *this; + operator--(); + return result; + } + + // STL support + + typedef ptrdiff_t difference_type; + typedef Value value_type; + typedef Value* pointer; + typedef Value& reference; + typedef std::random_access_iterator_tag iterator_category; + }; + + template + vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ) { + return vector_iterator( *v.my_vector, v.my_index+offset ); + } + + template + bool operator==( const vector_iterator& i, const vector_iterator& j ) { + return i.my_index==j.my_index && i.my_vector == j.my_vector; + } + + template + bool operator!=( const vector_iterator& i, const vector_iterator& j ) { + return !(i==j); + } + + template + bool operator<( const vector_iterator& i, const vector_iterator& j ) { + return i.my_index + bool operator>( const vector_iterator& i, const vector_iterator& j ) { + return j + bool operator>=( const vector_iterator& i, const vector_iterator& j ) { + return !(i + bool operator<=( const vector_iterator& i, const vector_iterator& j ) { + return !(j + ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ) { + return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index); + } + + template + class allocator_base { + public: + typedef typename tbb::internal::allocator_rebind::type allocator_type; + allocator_type my_allocator; + allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {} + }; + +} // namespace internal +//! @endcond + +//! Concurrent vector container +/** concurrent_vector is a container having the following main properties: + - It provides random indexed access to its elements. The index of the first element is 0. + - It ensures safe concurrent growing its size (different threads can safely append new elements). + - Adding new elements does not invalidate existing iterators and does not change indices of existing items. + +@par Compatibility + The class meets all Container Requirements and Reversible Container Requirements from + C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet + Sequence Requirements due to absence of insert() and erase() methods. + +@par Exception Safety + Methods working with memory allocation and/or new elements construction can throw an + exception if allocator fails to allocate memory or element's default constructor throws one. + Concurrent vector's element of type T must conform to the following requirements: + - Throwing an exception is forbidden for destructor of T. + - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized. + . + Otherwise, the program's behavior is undefined. +@par + If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation. + Invalid state means: + - There are no guarantees that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens. + - An invalid vector instance cannot be repaired; it is unable to grow anymore. + - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful. + - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown. + . + If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails. + +@par Fragmentation + Unlike an STL vector, a concurrent_vector does not move existing elements if it needs + to allocate more memory. The container is divided into a series of contiguous arrays of + elements. The first reservation, growth, or assignment operation determines the size of + the first array. Using small number of elements as initial size incurs fragmentation that + may increase element access time. Internal layout can be optimized by method compact() that + merges several smaller arrays into one solid. + +@par Changes since TBB 2.1 + - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated. + - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector + - Added resize() methods (not thread-safe) + - Added cbegin/cend/crbegin/crend methods + - Changed return type of methods grow* and push_back to iterator + +@par Changes since TBB 2.0 + - Implemented exception-safety guarantees + - Added template argument for allocator + - Added allocator argument in constructors + - Faster index calculation + - First growth call specifies a number of segments to be merged in the first allocation. + - Fixed memory blow up for swarm of vector's instances of small size + - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items. + - Added STL-like constructors. + - Added operators ==, < and derivatives + - Added at() method, approved for using after an exception was thrown inside the vector + - Added get_allocator() method. + - Added assign() methods + - Added compact() method to defragment first segments + - Added swap() method + - range() defaults on grainsize = 1 supporting auto grainsize algorithms. + + @ingroup containers */ +template +class concurrent_vector: protected internal::allocator_base, + private internal::concurrent_vector_base { +private: + template + class generic_range_type: public blocked_range { + public: + typedef T value_type; + typedef T& reference; + typedef const T& const_reference; + typedef I iterator; + typedef ptrdiff_t difference_type; + generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} + template + generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} + generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} + }; + + template + friend class internal::vector_iterator; + +public: + //------------------------------------------------------------------------ + // STL compatible types + //------------------------------------------------------------------------ + typedef internal::concurrent_vector_base_v3::size_type size_type; + typedef typename internal::allocator_base::allocator_type allocator_type; + + typedef T value_type; + typedef ptrdiff_t difference_type; + typedef T& reference; + typedef const T& const_reference; + typedef T *pointer; + typedef const T *const_pointer; + + typedef internal::vector_iterator iterator; + typedef internal::vector_iterator const_iterator; + +#if !defined(_MSC_VER) || _CPPLIB_VER>=300 + // Assume ISO standard definition of std::reverse_iterator + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; +#else + // Use non-standard std::reverse_iterator + typedef std::reverse_iterator reverse_iterator; + typedef std::reverse_iterator const_reverse_iterator; +#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */ + + //------------------------------------------------------------------------ + // Parallel algorithm support + //------------------------------------------------------------------------ + typedef generic_range_type range_type; + typedef generic_range_type const_range_type; + + //------------------------------------------------------------------------ + // STL compatible constructors & destructors + //------------------------------------------------------------------------ + + //! Construct empty vector. + explicit concurrent_vector(const allocator_type &a = allocator_type()) + : internal::allocator_base(a), internal::concurrent_vector_base() + { + vector_allocator_ptr = &internal_allocator; + } + + //Constructors are not required to have synchronization + //(for more details see comment in the concurrent_vector_base constructor). +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Constructor from initializer_list + concurrent_vector(std::initializer_list init_list, const allocator_type &a = allocator_type()) + : internal::allocator_base(a), internal::concurrent_vector_base() + { + vector_allocator_ptr = &internal_allocator; + __TBB_TRY { + internal_assign_iterators(init_list.begin(), init_list.end()); + } __TBB_CATCH(...) { + segment_t *table = my_segment.load();; + internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); + __TBB_RETHROW(); + } + + } +#endif //# __TBB_INITIALIZER_LISTS_PRESENT + + //! Copying constructor + concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) + : internal::allocator_base(a), internal::concurrent_vector_base() + { + vector_allocator_ptr = &internal_allocator; + __TBB_TRY { + internal_copy(vector, sizeof(T), ©_array); + } __TBB_CATCH(...) { + segment_t *table = my_segment.load(); + internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); + __TBB_RETHROW(); + } + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move constructor + //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor::value) + concurrent_vector( concurrent_vector&& source) + : internal::allocator_base(std::move(source)), internal::concurrent_vector_base() + { + vector_allocator_ptr = &internal_allocator; + concurrent_vector_base_v3::internal_swap(source); + } + + concurrent_vector( concurrent_vector&& source, const allocator_type& a) + : internal::allocator_base(a), internal::concurrent_vector_base() + { + vector_allocator_ptr = &internal_allocator; + //C++ standard requires instances of an allocator being compared for equality, + //which means that memory allocated by one instance is possible to deallocate with the other one. + if (a == source.my_allocator) { + concurrent_vector_base_v3::internal_swap(source); + } else { + __TBB_TRY { + internal_copy(source, sizeof(T), &move_array); + } __TBB_CATCH(...) { + segment_t *table = my_segment.load(); + internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load()); + __TBB_RETHROW(); + } + } + } + +#endif + + //! Copying constructor for vector with different allocator type + template + __TBB_DEPRECATED concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) + : internal::allocator_base(a), internal::concurrent_vector_base() + { + vector_allocator_ptr = &internal_allocator; + __TBB_TRY { + internal_copy(vector.internal_vector_base(), sizeof(T), ©_array); + } __TBB_CATCH(...) { + segment_t *table = my_segment.load(); + internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); + __TBB_RETHROW(); + } + } + + //! Construction with initial size specified by argument n + explicit concurrent_vector(size_type n) + { + vector_allocator_ptr = &internal_allocator; + __TBB_TRY { + internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); + } __TBB_CATCH(...) { + segment_t *table = my_segment.load(); + internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); + __TBB_RETHROW(); + } + } + + //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance + concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type()) + : internal::allocator_base(a) + { + vector_allocator_ptr = &internal_allocator; + __TBB_TRY { + internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); + } __TBB_CATCH(...) { + segment_t *table = my_segment.load(); + internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); + __TBB_RETHROW(); + } + } + + //! Construction with copying iteration range and given allocator instance + template + concurrent_vector(I first, I last, const allocator_type &a = allocator_type()) + : internal::allocator_base(a) + { + vector_allocator_ptr = &internal_allocator; + __TBB_TRY { + internal_assign_range(first, last, static_cast::is_integer> *>(0) ); + } __TBB_CATCH(...) { + segment_t *table = my_segment.load(); + internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); + __TBB_RETHROW(); + } + } + + //! Assignment + concurrent_vector& operator=( const concurrent_vector& vector ) { + if( this != &vector ) + internal_assign(vector, sizeof(T), &destroy_array, &assign_array, ©_array); + return *this; + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //TODO: add __TBB_NOEXCEPT() + //! Move assignment + concurrent_vector& operator=( concurrent_vector&& other ) { + __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited "); + typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; + if(pocma_t::value || this->my_allocator == other.my_allocator) { + concurrent_vector trash (std::move(*this)); + internal_swap(other); + tbb::internal::allocator_move_assignment(this->my_allocator, other.my_allocator, pocma_t()); + } else { + internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array); + } + return *this; + } +#endif + //TODO: add an template assignment operator? (i.e. with different element type) + + //! Assignment for vector with different allocator type + template + __TBB_DEPRECATED concurrent_vector& operator=( const concurrent_vector& vector ) { + if( static_cast( this ) != static_cast( &vector ) ) + internal_assign(vector.internal_vector_base(), + sizeof(T), &destroy_array, &assign_array, ©_array); + return *this; + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Assignment for initializer_list + concurrent_vector& operator=( std::initializer_list init_list ) { + internal_clear(&destroy_array); + internal_assign_iterators(init_list.begin(), init_list.end()); + return *this; + } +#endif //#if __TBB_INITIALIZER_LISTS_PRESENT + + //------------------------------------------------------------------------ + // Concurrent operations + //------------------------------------------------------------------------ + //! Grow by "delta" elements. + /** Returns iterator pointing to the first new element. */ + iterator grow_by( size_type delta ) { + return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load()); + } + + //! Grow by "delta" elements using copying constructor. + /** Returns iterator pointing to the first new element. */ + iterator grow_by( size_type delta, const_reference t ) { + return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast(&t) ) : my_early_size.load()); + } + + /** Returns iterator pointing to the first new element. */ + template + iterator grow_by( I first, I last ) { + typename std::iterator_traits::difference_type delta = std::distance(first, last); + __TBB_ASSERT( delta >= 0, NULL); + + return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), ©_range, static_cast(&first)) : my_early_size.load()); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + /** Returns iterator pointing to the first new element. */ + iterator grow_by( std::initializer_list init_list ) { + return grow_by( init_list.begin(), init_list.end() ); + } +#endif //#if __TBB_INITIALIZER_LISTS_PRESENT + + //! Append minimal sequence of elements such that size()>=n. + /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. + May return while other elements are being constructed by other threads. + Returns iterator that points to beginning of appended sequence. + If no elements were appended, returns iterator pointing to nth element. */ + iterator grow_to_at_least( size_type n ) { + size_type m=0; + if( n ) { + m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL ); + if( m>n ) m=n; + } + return iterator(*this, m); + }; + + /** Analogous to grow_to_at_least( size_type n ) with exception that the new + elements are initialized by copying of t instead of default construction. */ + iterator grow_to_at_least( size_type n, const_reference t ) { + size_type m=0; + if( n ) { + m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t); + if( m>n ) m=n; + } + return iterator(*this, m); + }; + + //! Push item + /** Returns iterator pointing to the new element. */ + iterator push_back( const_reference item ) + { + push_back_helper prolog(*this); + new(prolog.internal_push_back_result()) T(item); + return prolog.return_iterator_and_dismiss(); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Push item, move-aware + /** Returns iterator pointing to the new element. */ + iterator push_back( T&& item ) + { + push_back_helper prolog(*this); + new(prolog.internal_push_back_result()) T(std::move(item)); + return prolog.return_iterator_and_dismiss(); + } +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + //! Push item, create item "in place" with provided arguments + /** Returns iterator pointing to the new element. */ + template + iterator emplace_back( Args&&... args ) + { + push_back_helper prolog(*this); + new(prolog.internal_push_back_result()) T(std::forward(args)...); + return prolog.return_iterator_and_dismiss(); + } +#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +#endif //__TBB_CPP11_RVALUE_REF_PRESENT + //! Get reference to element at given index. + /** This method is thread-safe for concurrent reads, and also while growing the vector, + as long as the calling thread has checked that index < size(). */ + reference operator[]( size_type index ) { + return internal_subscript(index); + } + + //! Get const reference to element at given index. + const_reference operator[]( size_type index ) const { + return internal_subscript(index); + } + + //! Get reference to element at given index. Throws exceptions on errors. + reference at( size_type index ) { + return internal_subscript_with_exceptions(index); + } + + //! Get const reference to element at given index. Throws exceptions on errors. + const_reference at( size_type index ) const { + return internal_subscript_with_exceptions(index); + } + + //! Get range for iterating with parallel algorithms + range_type range( size_t grainsize = 1 ) { + return range_type( begin(), end(), grainsize ); + } + + //! Get const range for iterating with parallel algorithms + const_range_type range( size_t grainsize = 1 ) const { + return const_range_type( begin(), end(), grainsize ); + } + + //------------------------------------------------------------------------ + // Capacity + //------------------------------------------------------------------------ + //! Return size of vector. It may include elements under construction + size_type size() const { + size_type sz = my_early_size, cp = internal_capacity(); + return cp < sz ? cp : sz; + } + + //! Return false if vector is not empty or has elements under construction at least. + bool empty() const {return !my_early_size;} + + //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value. + size_type capacity() const {return internal_capacity();} + + //! Allocate enough space to grow to size n without having to allocate more memory later. + /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. + The capacity afterwards may be bigger than the requested reservation. */ + void reserve( size_type n ) { + if( n ) + internal_reserve(n, sizeof(T), max_size()); + } + + //! Resize the vector. Not thread-safe. + void resize( size_type n ) { + internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); + } + + //! Resize the vector, copy t for new elements. Not thread-safe. + void resize( size_type n, const_reference t ) { + internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); + } + + //! Optimize memory usage and fragmentation. + void shrink_to_fit(); + + //! Upper bound on argument to reserve. + size_type max_size() const {return (~size_type(0))/sizeof(T);} + + //------------------------------------------------------------------------ + // STL support + //------------------------------------------------------------------------ + + //! start iterator + iterator begin() {return iterator(*this,0);} + //! end iterator + iterator end() {return iterator(*this,size());} + //! start const iterator + const_iterator begin() const {return const_iterator(*this,0);} + //! end const iterator + const_iterator end() const {return const_iterator(*this,size());} + //! start const iterator + const_iterator cbegin() const {return const_iterator(*this,0);} + //! end const iterator + const_iterator cend() const {return const_iterator(*this,size());} + //! reverse start iterator + reverse_iterator rbegin() {return reverse_iterator(end());} + //! reverse end iterator + reverse_iterator rend() {return reverse_iterator(begin());} + //! reverse start const iterator + const_reverse_iterator rbegin() const {return const_reverse_iterator(end());} + //! reverse end const iterator + const_reverse_iterator rend() const {return const_reverse_iterator(begin());} + //! reverse start const iterator + const_reverse_iterator crbegin() const {return const_reverse_iterator(end());} + //! reverse end const iterator + const_reverse_iterator crend() const {return const_reverse_iterator(begin());} + //! the first item + reference front() { + __TBB_ASSERT( size()>0, NULL); + const segment_value_t& segment_value = my_segment[0].template load(); + return (segment_value.template pointer())[0]; + } + //! the first item const + const_reference front() const { + __TBB_ASSERT( size()>0, NULL); + const segment_value_t& segment_value = my_segment[0].template load(); + return (segment_value.template pointer())[0]; + } + //! the last item + reference back() { + __TBB_ASSERT( size()>0, NULL); + return internal_subscript( size()-1 ); + } + //! the last item const + const_reference back() const { + __TBB_ASSERT( size()>0, NULL); + return internal_subscript( size()-1 ); + } + //! return allocator object + allocator_type get_allocator() const { return this->my_allocator; } + + //! assign n items by copying t item + void assign(size_type n, const_reference t) { + clear(); + internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); + } + + //! assign range [first, last) + template + void assign(I first, I last) { + clear(); internal_assign_range( first, last, static_cast::is_integer> *>(0) ); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! assigns an initializer list + void assign(std::initializer_list init_list) { + clear(); internal_assign_iterators( init_list.begin(), init_list.end()); + } +#endif //# __TBB_INITIALIZER_LISTS_PRESENT + + //! swap two instances + void swap(concurrent_vector &vector) { + typedef typename tbb::internal::allocator_traits::propagate_on_container_swap pocs_t; + if( this != &vector && (this->my_allocator == vector.my_allocator || pocs_t::value) ) { + concurrent_vector_base_v3::internal_swap(static_cast(vector)); + tbb::internal::allocator_swap(this->my_allocator, vector.my_allocator, pocs_t()); + } + } + + //! Clear container while keeping memory allocated. + /** To free up the memory, use in conjunction with method compact(). Not thread safe **/ + void clear() { + internal_clear(&destroy_array); + } + + //! Clear and destroy vector. + ~concurrent_vector() { + segment_t *table = my_segment.load(); + internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load() ); + // base class destructor call should be then + } + + const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; } +private: + //! Allocate k items + static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) { + return static_cast&>(vb).my_allocator.allocate(k); + } + //! Free k segments from table + void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block); + + //! Get reference to element at given index. + T& internal_subscript( size_type index ) const; + + //! Get reference to element at given index with errors checks + T& internal_subscript_with_exceptions( size_type index ) const; + + //! assign n items by copying t + void internal_assign_n(size_type n, const_pointer p) { + internal_resize( n, sizeof(T), max_size(), static_cast(p), &destroy_array, p? &initialize_array_by : &initialize_array ); + } + + //! True/false function override helper + /* Functions declarations: + * void foo(is_integer_tag*); + * void foo(is_integer_tag*); + * Usage example: + * foo(static_cast::is_integer>*>(0)); + */ + template class is_integer_tag; + + //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9 + template + void internal_assign_range(I first, I last, is_integer_tag *) { + internal_assign_n(static_cast(first), &static_cast(last)); + } + //! inline proxy assign by iterators + template + void internal_assign_range(I first, I last, is_integer_tag *) { + internal_assign_iterators(first, last); + } + //! assign by iterators + template + void internal_assign_iterators(I first, I last); + + //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library + + //! Construct n instances of T, starting at "begin". + static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n ); + + //! Copy-construct n instances of T, starting at "begin". + static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n ); + + //! Copy-construct n instances of T by copying single element pointed to by src, starting at "dst". + static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n ); + +#if __TBB_MOVE_IF_NOEXCEPT_PRESENT + //! Either opy or move-construct n instances of T, starting at "dst" by copying according element of src array. + static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n ); +#endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! Move-construct n instances of T, starting at "dst" by copying according element of src array. + static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n ); + + //! Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array. + static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n ); +#endif + //! Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator, p_type_erased_iterator+n). + template + static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n ); + + //! Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array. + static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n ); + + //! Destroy n instances of T, starting at "begin". + static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n ); + + //! Exception-aware helper class for filling a segment by exception-danger operators of user class + class internal_loop_guide : internal::no_copy { + public: + const pointer array; + const size_type n; + size_type i; + + static const T* as_const_pointer(const void *ptr) { return static_cast(ptr); } + static T* as_pointer(const void *src) { return static_cast(const_cast(src)); } + + internal_loop_guide(size_type ntrials, void *ptr) + : array(as_pointer(ptr)), n(ntrials), i(0) {} + void init() { for(; i < n; ++i) new( &array[i] ) T(); } + void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); } + void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); } + void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; } +#if __TBB_CPP11_RVALUE_REF_PRESENT + void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); } + void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); } +#endif +#if __TBB_MOVE_IF_NOEXCEPT_PRESENT + void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); } +#endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT + + //TODO: rename to construct_range + template void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); } + ~internal_loop_guide() { + if(i < n) {// if an exception was raised, fill the rest of items with zeros + internal::handle_unconstructed_elements(array+i, n-i); + } + } + }; + + struct push_back_helper : internal::no_copy{ + struct element_construction_guard : internal::no_copy{ + pointer element; + + element_construction_guard(pointer an_element) : element (an_element){} + void dismiss(){ element = NULL; } + ~element_construction_guard(){ + if (element){ + internal::handle_unconstructed_elements(element, 1); + } + } + }; + + concurrent_vector & v; + size_type k; + element_construction_guard g; + + push_back_helper(concurrent_vector & vector) : + v(vector), + g (static_cast(v.internal_push_back(sizeof(T),k))) + {} + + pointer internal_push_back_result(){ return g.element;} + iterator return_iterator_and_dismiss(){ + pointer ptr = g.element; + g.dismiss(); + return iterator(v, k, ptr); + } + }; +}; + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +// Deduction guide for the constructor from two iterators +template::value_type, + typename A = cache_aligned_allocator +> concurrent_vector(I, I, const A& = A()) +-> concurrent_vector; + +// Deduction guide for the constructor from a vector and allocator +template +concurrent_vector(const concurrent_vector &, const A2 &) +-> concurrent_vector; + +// Deduction guide for the constructor from an initializer_list +template +> concurrent_vector(std::initializer_list, const A& = A()) +-> concurrent_vector; +#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */ + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +#pragma warning (push) +#pragma warning (disable: 4701) // potentially uninitialized local variable "old" +#endif +template +void concurrent_vector::shrink_to_fit() { + internal_segments_table old; + __TBB_TRY { + internal_array_op2 copy_or_move_array = +#if __TBB_MOVE_IF_NOEXCEPT_PRESENT + &move_array_if_noexcept +#else + ©_array +#endif + ; + if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) ) + internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments + } __TBB_CATCH(...) { + if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype] + internal_free_segments( old.table, 1, old.first_block ); + __TBB_RETHROW(); + } +} +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +#pragma warning (pop) +#endif // warning 4701 is back + +template +void concurrent_vector::internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block) { + // Free the arrays + while( k > first_block ) { + --k; + segment_value_t segment_value = table[k].load(); + table[k].store(segment_not_used()); + if( segment_value == segment_allocated() ) // check for correct segment pointer + this->my_allocator.deallocate( (segment_value.pointer()), segment_size(k) ); + } + segment_value_t segment_value = table[0].load(); + if( segment_value == segment_allocated() ) { + __TBB_ASSERT( first_block > 0, NULL ); + while(k > 0) table[--k].store(segment_not_used()); + this->my_allocator.deallocate( (segment_value.pointer()), segment_size(first_block) ); + } +} + +template +T& concurrent_vector::internal_subscript( size_type index ) const { + //TODO: unify both versions of internal_subscript + __TBB_ASSERT( index < my_early_size, "index out of bounds" ); + size_type j = index; + segment_index_t k = segment_base_index_of( j ); + __TBB_ASSERT( my_segment.load() != my_storage || k < pointers_per_short_table, "index is being allocated" ); + //no need in load with acquire (load) since thread works in own space or gets + //the information about added elements via some form of external synchronization + //TODO: why not make a load of my_segment relaxed as well ? + //TODO: add an assertion that my_segment[k] is properly aligned to please ITT + segment_value_t segment_value = my_segment[k].template load(); + __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" ); + __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" ); + return (( segment_value.pointer()))[j]; +} + +template +T& concurrent_vector::internal_subscript_with_exceptions( size_type index ) const { + if( index >= my_early_size ) + internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range + size_type j = index; + segment_index_t k = segment_base_index_of( j ); + //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table + if( my_segment.load() == my_storage && k >= pointers_per_short_table ) + internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error + // no need in load with acquire (load) since thread works in own space or gets + //the information about added elements via some form of external synchronization + //TODO: why not make a load of my_segment relaxed as well ? + //TODO: add an assertion that my_segment[k] is properly aligned to please ITT + segment_value_t segment_value = my_segment[k].template load(); + enforce_segment_allocated(segment_value, internal::eid_index_range_error); + return (segment_value.pointer())[j]; +} + +template template +void concurrent_vector::internal_assign_iterators(I first, I last) { + __TBB_ASSERT(my_early_size == 0, NULL); + size_type n = std::distance(first, last); + if( !n ) return; + internal_reserve(n, sizeof(T), max_size()); + my_early_size = n; + segment_index_t k = 0; + //TODO: unify segment iteration code with concurrent_base_v3::helper + size_type sz = segment_size( my_first_block ); + while( sz < n ) { + internal_loop_guide loop(sz, my_segment[k].template load().template pointer()); + loop.iterate(first); + n -= sz; + if( !k ) k = my_first_block; + else { ++k; sz <<= 1; } + } + internal_loop_guide loop(n, my_segment[k].template load().template pointer()); + loop.iterate(first); +} + +template +void concurrent_vector::initialize_array( void* begin, const void *, size_type n ) { + internal_loop_guide loop(n, begin); loop.init(); +} + +template +void concurrent_vector::initialize_array_by( void* begin, const void *src, size_type n ) { + internal_loop_guide loop(n, begin); loop.init(src); +} + +template +void concurrent_vector::copy_array( void* dst, const void* src, size_type n ) { + internal_loop_guide loop(n, dst); loop.copy(src); +} + +#if __TBB_CPP11_RVALUE_REF_PRESENT +template +void concurrent_vector::move_array( void* dst, const void* src, size_type n ) { + internal_loop_guide loop(n, dst); loop.move_construct(src); +} +template +void concurrent_vector::move_assign_array( void* dst, const void* src, size_type n ) { + internal_loop_guide loop(n, dst); loop.move_assign(src); +} +#endif + +#if __TBB_MOVE_IF_NOEXCEPT_PRESENT +template +void concurrent_vector::move_array_if_noexcept( void* dst, const void* src, size_type n ) { + internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src); +} +#endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT + +template +template +void concurrent_vector::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){ + internal_loop_guide loop(n, dst); + loop.iterate( *(static_cast(const_cast(p_type_erased_iterator))) ); +} + +template +void concurrent_vector::assign_array( void* dst, const void* src, size_type n ) { + internal_loop_guide loop(n, dst); loop.assign(src); +} + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Workaround for overzealous compiler warning + #pragma warning (push) + #pragma warning (disable: 4189) +#endif +template +void concurrent_vector::destroy_array( void* begin, size_type n ) { + T* array = static_cast(begin); + for( size_type j=n; j>0; --j ) + array[j-1].~T(); // destructors are supposed to not throw any exceptions +} +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif // warning 4189 is back + +// concurrent_vector's template functions +template +inline bool operator==(const concurrent_vector &a, const concurrent_vector &b) { + //TODO: call size() only once per vector (in operator==) + // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin()); + if(a.size() != b.size()) return false; + typename concurrent_vector::const_iterator i(a.begin()); + typename concurrent_vector::const_iterator j(b.begin()); + for(; i != a.end(); ++i, ++j) + if( !(*i == *j) ) return false; + return true; +} + +template +inline bool operator!=(const concurrent_vector &a, const concurrent_vector &b) +{ return !(a == b); } + +template +inline bool operator<(const concurrent_vector &a, const concurrent_vector &b) +{ return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); } + +template +inline bool operator>(const concurrent_vector &a, const concurrent_vector &b) +{ return b < a; } + +template +inline bool operator<=(const concurrent_vector &a, const concurrent_vector &b) +{ return !(b < a); } + +template +inline bool operator>=(const concurrent_vector &a, const concurrent_vector &b) +{ return !(a < b); } + +template +inline void swap(concurrent_vector &a, concurrent_vector &b) +{ a.swap( b ); } + +} // namespace tbb + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif // warning 4267,4127 are back + + +#undef __TBB_concurrent_vector_H_include_area +#include "internal/_warning_suppress_disable_notice.h" + +#endif /* __TBB_concurrent_vector_H */ diff --git a/ohos/arm64-v8a/include/tbb/critical_section.h b/ohos/arm64-v8a/include/tbb/critical_section.h new file mode 100644 index 00000000..fb3332b2 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/critical_section.h @@ -0,0 +1,147 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_critical_section_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_critical_section_H +#pragma message("TBB Warning: tbb/critical_section.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef _TBB_CRITICAL_SECTION_H_ +#define _TBB_CRITICAL_SECTION_H_ + +#define __TBB_critical_section_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if _WIN32||_WIN64 +#include "machine/windows_api.h" +#else +#include +#include +#endif // _WIN32||WIN64 + +#include "tbb_stddef.h" +#include "tbb_thread.h" +#include "tbb_exception.h" + +#include "tbb_profiling.h" + +namespace tbb { + + namespace internal { +class critical_section_v4 : internal::no_copy { +#if _WIN32||_WIN64 + CRITICAL_SECTION my_impl; +#else + pthread_mutex_t my_impl; +#endif + tbb_thread::id my_tid; +public: + + void __TBB_EXPORTED_METHOD internal_construct(); + + critical_section_v4() { +#if _WIN32||_WIN64 + InitializeCriticalSectionEx( &my_impl, 4000, 0 ); +#else + pthread_mutex_init(&my_impl, NULL); +#endif + internal_construct(); + } + + ~critical_section_v4() { + __TBB_ASSERT(my_tid == tbb_thread::id(), "Destroying a still-held critical section"); +#if _WIN32||_WIN64 + DeleteCriticalSection(&my_impl); +#else + pthread_mutex_destroy(&my_impl); +#endif + } + + class scoped_lock : internal::no_copy { + private: + critical_section_v4 &my_crit; + public: + scoped_lock( critical_section_v4& lock_me) :my_crit(lock_me) { + my_crit.lock(); + } + + ~scoped_lock() { + my_crit.unlock(); + } + }; + + void lock() { + tbb_thread::id local_tid = this_tbb_thread::get_id(); + if(local_tid == my_tid) throw_exception( eid_improper_lock ); +#if _WIN32||_WIN64 + EnterCriticalSection( &my_impl ); +#else + int rval = pthread_mutex_lock(&my_impl); + __TBB_ASSERT_EX(!rval, "critical_section::lock: pthread_mutex_lock failed"); +#endif + __TBB_ASSERT(my_tid == tbb_thread::id(), NULL); + my_tid = local_tid; + } + + bool try_lock() { + bool gotlock; + tbb_thread::id local_tid = this_tbb_thread::get_id(); + if(local_tid == my_tid) return false; +#if _WIN32||_WIN64 + gotlock = TryEnterCriticalSection( &my_impl ) != 0; +#else + int rval = pthread_mutex_trylock(&my_impl); + // valid returns are 0 (locked) and [EBUSY] + __TBB_ASSERT(rval == 0 || rval == EBUSY, "critical_section::trylock: pthread_mutex_trylock failed"); + gotlock = rval == 0; +#endif + if(gotlock) { + my_tid = local_tid; + } + return gotlock; + } + + void unlock() { + __TBB_ASSERT(this_tbb_thread::get_id() == my_tid, "thread unlocking critical_section is not thread that locked it"); + my_tid = tbb_thread::id(); +#if _WIN32||_WIN64 + LeaveCriticalSection( &my_impl ); +#else + int rval = pthread_mutex_unlock(&my_impl); + __TBB_ASSERT_EX(!rval, "critical_section::unlock: pthread_mutex_unlock failed"); +#endif + } + + static const bool is_rw_mutex = false; + static const bool is_recursive_mutex = false; + static const bool is_fair_mutex = true; +}; // critical_section_v4 +} // namespace internal +__TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::critical_section is deprecated, use std::mutex") typedef internal::critical_section_v4 critical_section; + +__TBB_DEFINE_PROFILING_SET_NAME(critical_section) +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_critical_section_H_include_area + +#endif // _TBB_CRITICAL_SECTION_H_ diff --git a/ohos/arm64-v8a/include/tbb/enumerable_thread_specific.h b/ohos/arm64-v8a/include/tbb/enumerable_thread_specific.h new file mode 100644 index 00000000..248597f2 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/enumerable_thread_specific.h @@ -0,0 +1,1173 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_enumerable_thread_specific_H +#define __TBB_enumerable_thread_specific_H + +#define __TBB_enumerable_thread_specific_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "atomic.h" +#include "concurrent_vector.h" +#include "tbb_thread.h" +#include "tbb_allocator.h" +#include "cache_aligned_allocator.h" +#include "aligned_space.h" +#include "internal/_template_helpers.h" +#include "internal/_tbb_hash_compare_impl.h" +#include "tbb_profiling.h" +#include // for memcpy + +#if __TBB_PREVIEW_RESUMABLE_TASKS +#include "task.h" // for task::suspend_point +#endif + +#if _WIN32||_WIN64 +#include "machine/windows_api.h" +#else +#include +#endif + +#define __TBB_ETS_USE_CPP11 \ + (__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \ + && __TBB_CPP11_DECLTYPE_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT) + +namespace tbb { + +//! enum for selecting between single key and key-per-instance versions +enum ets_key_usage_type { + ets_key_per_instance + , ets_no_key +#if __TBB_PREVIEW_RESUMABLE_TASKS + , ets_suspend_aware +#endif +}; + +namespace interface6 { + + // Forward declaration to use in internal classes + template + class enumerable_thread_specific; + + //! @cond + namespace internal { + + using namespace tbb::internal; + + template + struct ets_key_selector { + typedef tbb_thread::id key_type; + static key_type current_key() { + return tbb::internal::thread_get_id_v3(); + } + }; + +#if __TBB_PREVIEW_RESUMABLE_TASKS + template <> + struct ets_key_selector { + typedef task::suspend_point key_type; + static key_type current_key() { + return internal_current_suspend_point(); + } + }; + + inline task::suspend_point atomic_compare_and_swap(task::suspend_point& location, + const task::suspend_point& value, const task::suspend_point& comparand) { + return as_atomic(location).compare_and_swap(value, comparand); + } +#endif + + template + class ets_base: tbb::internal::no_copy { + protected: + typedef typename ets_key_selector::key_type key_type; +#if __TBB_PROTECTED_NESTED_CLASS_BROKEN + public: +#endif + struct slot; + + struct array { + array* next; + size_t lg_size; + slot& at( size_t k ) { + return ((slot*)(void*)(this+1))[k]; + } + size_t size() const {return size_t(1)<>(8*sizeof(size_t)-lg_size); + } + }; + struct slot { + key_type key; + void* ptr; + bool empty() const {return key == key_type();} + bool match( key_type k ) const {return key == k;} + bool claim( key_type k ) { + // TODO: maybe claim ptr, because key_type is not guaranteed to fit into word size + return atomic_compare_and_swap(key, k, key_type()) == key_type(); + } + }; +#if __TBB_PROTECTED_NESTED_CLASS_BROKEN + protected: +#endif + + //! Root of linked list of arrays of decreasing size. + /** NULL if and only if my_count==0. + Each array in the list is half the size of its predecessor. */ + atomic my_root; + atomic my_count; + virtual void* create_local() = 0; + virtual void* create_array(size_t _size) = 0; // _size in bytes + virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes + array* allocate( size_t lg_size ) { + size_t n = size_t(1)<(create_array( sizeof(array)+n*sizeof(slot) )); + a->lg_size = lg_size; + std::memset( a+1, 0, n*sizeof(slot) ); + return a; + } + void free(array* a) { + size_t n = size_t(1)<<(a->lg_size); + free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) ); + } + + ets_base() {my_root=NULL; my_count=0;} + virtual ~ets_base(); // g++ complains if this is not virtual + void* table_lookup( bool& exists ); + void table_clear(); + // The following functions are not used in concurrent context, + // so we don't need synchronization and ITT annotations there. + template + void table_elementwise_copy( const ets_base& other, + void*(*add_element)(ets_base&, void*) ) { + __TBB_ASSERT(!my_root,NULL); + __TBB_ASSERT(!my_count,NULL); + if( !other.my_root ) return; + array* root = my_root = allocate(other.my_root->lg_size); + root->next = NULL; + my_count = other.my_count; + size_t mask = root->mask(); + for( array* r=other.my_root; r; r=r->next ) { + for( size_t i=0; isize(); ++i ) { + slot& s1 = r->at(i); + if( !s1.empty() ) { + for( size_t j = root->start(tbb::tbb_hash()(s1.key)); ; j=(j+1)&mask ) { + slot& s2 = root->at(j); + if( s2.empty() ) { + s2.ptr = add_element(static_cast&>(*this), s1.ptr); + s2.key = s1.key; + break; + } + else if( s2.match(s1.key) ) + break; + } + } + } + } + } + void table_swap( ets_base& other ) { + __TBB_ASSERT(this!=&other, "Don't swap an instance with itself"); + tbb::internal::swap(my_root, other.my_root); + tbb::internal::swap(my_count, other.my_count); + } + }; + + template + ets_base::~ets_base() { + __TBB_ASSERT(!my_root, NULL); + } + + template + void ets_base::table_clear() { + while( array* r = my_root ) { + my_root = r->next; + free(r); + } + my_count = 0; + } + + template + void* ets_base::table_lookup( bool& exists ) { + const key_type k = ets_key_selector::current_key(); + + __TBB_ASSERT(k != key_type(),NULL); + void* found; + size_t h = tbb::tbb_hash()(k); + for( array* r=my_root; r; r=r->next ) { + call_itt_notify(acquired,r); + size_t mask=r->mask(); + for(size_t i = r->start(h); ;i=(i+1)&mask) { + slot& s = r->at(i); + if( s.empty() ) break; + if( s.match(k) ) { + if( r==my_root ) { + // Success at top level + exists = true; + return s.ptr; + } else { + // Success at some other level. Need to insert at top level. + exists = true; + found = s.ptr; + goto insert; + } + } + } + } + // Key does not yet exist. The density of slots in the table does not exceed 0.5, + // for if this will occur a new table is allocated with double the current table + // size, which is swapped in as the new root table. So an empty slot is guaranteed. + exists = false; + found = create_local(); + { + size_t c = ++my_count; + array* r = my_root; + call_itt_notify(acquired,r); + if( !r || c>r->size()/2 ) { + size_t s = r ? r->lg_size : 2; + while( c>size_t(1)<<(s-1) ) ++s; + array* a = allocate(s); + for(;;) { + a->next = r; + call_itt_notify(releasing,a); + array* new_r = my_root.compare_and_swap(a,r); + if( new_r==r ) break; + call_itt_notify(acquired, new_r); + if( new_r->lg_size>=s ) { + // Another thread inserted an equal or bigger array, so our array is superfluous. + free(a); + break; + } + r = new_r; + } + } + } + insert: + // Whether a slot has been found in an older table, or if it has been inserted at this level, + // it has already been accounted for in the total. Guaranteed to be room for it, and it is + // not present, so search for empty slot and use it. + array* ir = my_root; + call_itt_notify(acquired, ir); + size_t mask = ir->mask(); + for(size_t i = ir->start(h);;i=(i+1)&mask) { + slot& s = ir->at(i); + if( s.empty() ) { + if( s.claim(k) ) { + s.ptr = found; + return found; + } + } + } + } + + //! Specialization that exploits native TLS + template <> + class ets_base: public ets_base { + typedef ets_base super; +#if _WIN32||_WIN64 +#if __TBB_WIN8UI_SUPPORT + typedef DWORD tls_key_t; + void create_key() { my_key = FlsAlloc(NULL); } + void destroy_key() { FlsFree(my_key); } + void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); } + void* get_tls() { return (void *)FlsGetValue(my_key); } +#else + typedef DWORD tls_key_t; + void create_key() { my_key = TlsAlloc(); } + void destroy_key() { TlsFree(my_key); } + void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); } + void* get_tls() { return (void *)TlsGetValue(my_key); } +#endif +#else + typedef pthread_key_t tls_key_t; + void create_key() { pthread_key_create(&my_key, NULL); } + void destroy_key() { pthread_key_delete(my_key); } + void set_tls( void * value ) const { pthread_setspecific(my_key, value); } + void* get_tls() const { return pthread_getspecific(my_key); } +#endif + tls_key_t my_key; + virtual void* create_local() __TBB_override = 0; + virtual void* create_array(size_t _size) __TBB_override = 0; // _size in bytes + virtual void free_array(void* ptr, size_t _size) __TBB_override = 0; // size in bytes + protected: + ets_base() {create_key();} + ~ets_base() {destroy_key();} + void* table_lookup( bool& exists ) { + void* found = get_tls(); + if( found ) { + exists=true; + } else { + found = super::table_lookup(exists); + set_tls(found); + } + return found; + } + void table_clear() { + destroy_key(); + create_key(); + super::table_clear(); + } + void table_swap( ets_base& other ) { + using std::swap; + __TBB_ASSERT(this!=&other, "Don't swap an instance with itself"); + swap(my_key, other.my_key); + super::table_swap(other); + } + }; + + //! Random access iterator for traversing the thread local copies. + template< typename Container, typename Value > + class enumerable_thread_specific_iterator +#if defined(_WIN64) && defined(_MSC_VER) + // Ensure that Microsoft's internal template function _Val_type works correctly. + : public std::iterator +#endif /* defined(_WIN64) && defined(_MSC_VER) */ + { + //! current position in the concurrent_vector + + Container *my_container; + typename Container::size_type my_index; + mutable Value *my_value; + + template + friend enumerable_thread_specific_iterator + operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator& v ); + + template + friend bool operator==( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ); + + template + friend bool operator<( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ); + + template + friend ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ); + + template + friend class enumerable_thread_specific_iterator; + + public: + + enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : + my_container(&const_cast(container)), my_index(index), my_value(NULL) {} + + //! Default constructor + enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {} + + template + enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator& other ) : + my_container( other.my_container ), my_index( other.my_index), my_value( const_cast(other.my_value) ) {} + + enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const { + return enumerable_thread_specific_iterator(*my_container, my_index + offset); + } + + enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) { + my_index += offset; + my_value = NULL; + return *this; + } + + enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const { + return enumerable_thread_specific_iterator( *my_container, my_index-offset ); + } + + enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) { + my_index -= offset; + my_value = NULL; + return *this; + } + + Value& operator*() const { + Value* value = my_value; + if( !value ) { + value = my_value = (*my_container)[my_index].value(); + } + __TBB_ASSERT( value==(*my_container)[my_index].value(), "corrupt cache" ); + return *value; + } + + Value& operator[]( ptrdiff_t k ) const { + return (*my_container)[my_index + k].value; + } + + Value* operator->() const {return &operator*();} + + enumerable_thread_specific_iterator& operator++() { + ++my_index; + my_value = NULL; + return *this; + } + + enumerable_thread_specific_iterator& operator--() { + --my_index; + my_value = NULL; + return *this; + } + + //! Post increment + enumerable_thread_specific_iterator operator++(int) { + enumerable_thread_specific_iterator result = *this; + ++my_index; + my_value = NULL; + return result; + } + + //! Post decrement + enumerable_thread_specific_iterator operator--(int) { + enumerable_thread_specific_iterator result = *this; + --my_index; + my_value = NULL; + return result; + } + + // STL support + typedef ptrdiff_t difference_type; + typedef Value value_type; + typedef Value* pointer; + typedef Value& reference; + typedef std::random_access_iterator_tag iterator_category; + }; + + template + enumerable_thread_specific_iterator + operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator& v ) { + return enumerable_thread_specific_iterator( v.my_container, v.my_index + offset ); + } + + template + bool operator==( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return i.my_index==j.my_index && i.my_container == j.my_container; + } + + template + bool operator!=( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return !(i==j); + } + + template + bool operator<( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return i.my_index + bool operator>( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return j + bool operator>=( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return !(i + bool operator<=( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return !(j + ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, + const enumerable_thread_specific_iterator& j ) { + return i.my_index-j.my_index; + } + + template + class segmented_iterator +#if defined(_WIN64) && defined(_MSC_VER) + : public std::iterator +#endif + { + template + friend bool operator==(const segmented_iterator& i, const segmented_iterator& j); + + template + friend bool operator!=(const segmented_iterator& i, const segmented_iterator& j); + + template + friend class segmented_iterator; + + public: + + segmented_iterator() {my_segcont = NULL;} + + segmented_iterator( const SegmentedContainer& _segmented_container ) : + my_segcont(const_cast(&_segmented_container)), + outer_iter(my_segcont->end()) { } + + ~segmented_iterator() {} + + typedef typename SegmentedContainer::iterator outer_iterator; + typedef typename SegmentedContainer::value_type InnerContainer; + typedef typename InnerContainer::iterator inner_iterator; + + // STL support + typedef ptrdiff_t difference_type; + typedef Value value_type; + typedef typename SegmentedContainer::size_type size_type; + typedef Value* pointer; + typedef Value& reference; + typedef std::input_iterator_tag iterator_category; + + // Copy Constructor + template + segmented_iterator(const segmented_iterator& other) : + my_segcont(other.my_segcont), + outer_iter(other.outer_iter), + // can we assign a default-constructed iterator to inner if we're at the end? + inner_iter(other.inner_iter) + {} + + // assignment + template + segmented_iterator& operator=( const segmented_iterator& other) { + if(this != &other) { + my_segcont = other.my_segcont; + outer_iter = other.outer_iter; + if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter; + } + return *this; + } + + // allow assignment of outer iterator to segmented iterator. Once it is + // assigned, move forward until a non-empty inner container is found or + // the end of the outer container is reached. + segmented_iterator& operator=(const outer_iterator& new_outer_iter) { + __TBB_ASSERT(my_segcont != NULL, NULL); + // check that this iterator points to something inside the segmented container + for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) { + if( !outer_iter->empty() ) { + inner_iter = outer_iter->begin(); + break; + } + } + return *this; + } + + // pre-increment + segmented_iterator& operator++() { + advance_me(); + return *this; + } + + // post-increment + segmented_iterator operator++(int) { + segmented_iterator tmp = *this; + operator++(); + return tmp; + } + + bool operator==(const outer_iterator& other_outer) const { + __TBB_ASSERT(my_segcont != NULL, NULL); + return (outer_iter == other_outer && + (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin())); + } + + bool operator!=(const outer_iterator& other_outer) const { + return !operator==(other_outer); + + } + + // (i)* RHS + reference operator*() const { + __TBB_ASSERT(my_segcont != NULL, NULL); + __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container"); + __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen + return *inner_iter; + } + + // i-> + pointer operator->() const { return &operator*();} + + private: + SegmentedContainer* my_segcont; + outer_iterator outer_iter; + inner_iterator inner_iter; + + void advance_me() { + __TBB_ASSERT(my_segcont != NULL, NULL); + __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers + __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty. + ++inner_iter; + while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) { + inner_iter = outer_iter->begin(); + } + } + }; // segmented_iterator + + template + bool operator==( const segmented_iterator& i, + const segmented_iterator& j ) { + if(i.my_segcont != j.my_segcont) return false; + if(i.my_segcont == NULL) return true; + if(i.outer_iter != j.outer_iter) return false; + if(i.outer_iter == i.my_segcont->end()) return true; + return i.inner_iter == j.inner_iter; + } + + // != + template + bool operator!=( const segmented_iterator& i, + const segmented_iterator& j ) { + return !(i==j); + } + + template + struct construct_by_default: tbb::internal::no_assign { + void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization. + construct_by_default( int ) {} + }; + + template + struct construct_by_exemplar: tbb::internal::no_assign { + const T exemplar; + void construct(void*where) {new(where) T(exemplar);} + construct_by_exemplar( const T& t ) : exemplar(t) {} +#if __TBB_ETS_USE_CPP11 + construct_by_exemplar( T&& t ) : exemplar(std::move(t)) {} +#endif + }; + + template + struct construct_by_finit: tbb::internal::no_assign { + Finit f; + void construct(void* where) {new(where) T(f());} + construct_by_finit( const Finit& f_ ) : f(f_) {} +#if __TBB_ETS_USE_CPP11 + construct_by_finit( Finit&& f_ ) : f(std::move(f_)) {} +#endif + }; + +#if __TBB_ETS_USE_CPP11 + template + struct construct_by_args: tbb::internal::no_assign { + internal::stored_pack pack; + void construct(void* where) { + internal::call( [where](const typename strip

::type&... args ){ + new(where) T(args...); + }, pack ); + } + construct_by_args( P&& ... args ) : pack(std::forward

(args)...) {} + }; +#endif + + // storage for initialization function pointer + // TODO: consider removing the template parameter T here and in callback_leaf + template + class callback_base { + public: + // Clone *this + virtual callback_base* clone() const = 0; + // Destruct and free *this + virtual void destroy() = 0; + // Need virtual destructor to satisfy GCC compiler warning + virtual ~callback_base() { } + // Construct T at where + virtual void construct(void* where) = 0; + }; + + template + class callback_leaf: public callback_base, Constructor { +#if __TBB_ETS_USE_CPP11 + template callback_leaf( P&& ... params ) : Constructor(std::forward

(params)...) {} +#else + template callback_leaf( const X& x ) : Constructor(x) {} +#endif + // TODO: make the construction/destruction consistent (use allocator.construct/destroy) + typedef typename tbb::tbb_allocator my_allocator_type; + + callback_base* clone() const __TBB_override { + return make(*this); + } + + void destroy() __TBB_override { + my_allocator_type().destroy(this); + my_allocator_type().deallocate(this,1); + } + + void construct(void* where) __TBB_override { + Constructor::construct(where); + } + public: +#if __TBB_ETS_USE_CPP11 + template + static callback_base* make( P&& ... params ) { + void* where = my_allocator_type().allocate(1); + return new(where) callback_leaf( std::forward

(params)... ); + } +#else + template + static callback_base* make( const X& x ) { + void* where = my_allocator_type().allocate(1); + return new(where) callback_leaf(x); + } +#endif + }; + + //! Template for recording construction of objects in table + /** All maintenance of the space will be done explicitly on push_back, + and all thread local copies must be destroyed before the concurrent + vector is deleted. + + The flag is_built is initialized to false. When the local is + successfully-constructed, set the flag to true or call value_committed(). + If the constructor throws, the flag will be false. + */ + template + struct ets_element { + tbb::aligned_space my_space; + bool is_built; + ets_element() { is_built = false; } // not currently-built + U* value() { return my_space.begin(); } + U* value_committed() { is_built = true; return my_space.begin(); } + ~ets_element() { + if(is_built) { + my_space.begin()->~U(); + is_built = false; + } + } + }; + + // A predicate that can be used for a compile-time compatibility check of ETS instances + // Ideally, it should have been declared inside the ETS class, but unfortunately + // in that case VS2013 does not enable the variadic constructor. + template struct is_compatible_ets { static const bool value = false; }; + template + struct is_compatible_ets< T, enumerable_thread_specific > { static const bool value = internal::is_same_type::value; }; + +#if __TBB_ETS_USE_CPP11 + // A predicate that checks whether, for a variable 'foo' of type T, foo() is a valid expression + template + class is_callable_no_args { + private: + typedef char yes[1]; + typedef char no [2]; + + template static yes& decide( decltype(declval()())* ); + template static no& decide(...); + public: + static const bool value = (sizeof(decide(NULL)) == sizeof(yes)); + }; +#endif + + } // namespace internal + //! @endcond + + //! The enumerable_thread_specific container + /** enumerable_thread_specific has the following properties: + - thread-local copies are lazily created, with default, exemplar or function initialization. + - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant. + - the contained objects need not have operator=() defined if combine is not used. + - enumerable_thread_specific containers may be copy-constructed or assigned. + - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed. + - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods + + @par Segmented iterator + When the thread-local objects are containers with input_iterators defined, a segmented iterator may + be used to iterate over all the elements of all thread-local copies. + + @par combine and combine_each + - Both methods are defined for enumerable_thread_specific. + - combine() requires the type T have operator=() defined. + - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.) + - Both are evaluated in serial context (the methods are assumed to be non-benign.) + + @ingroup containers */ + template , + ets_key_usage_type ETS_key_type=ets_no_key > + class enumerable_thread_specific: internal::ets_base { + + template friend class enumerable_thread_specific; + + typedef internal::padded< internal::ets_element > padded_element; + + //! A generic range, used to create range objects from the iterators + template + class generic_range_type: public blocked_range { + public: + typedef T value_type; + typedef T& reference; + typedef const T& const_reference; + typedef I iterator; + typedef ptrdiff_t difference_type; + generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} + template + generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} + generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} + }; + + typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type; + typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type; + + internal::callback_base *my_construct_callback; + + internal_collection_type my_locals; + + // TODO: consider unifying the callback mechanism for all create_local* methods below + // (likely non-compatible and requires interface version increase) + void* create_local() __TBB_override { + padded_element& lref = *my_locals.grow_by(1); + my_construct_callback->construct(lref.value()); + return lref.value_committed(); + } + + static void* create_local_by_copy( internal::ets_base& base, void* p ) { + enumerable_thread_specific& ets = static_cast(base); + padded_element& lref = *ets.my_locals.grow_by(1); + new(lref.value()) T(*static_cast(p)); + return lref.value_committed(); + } + +#if __TBB_ETS_USE_CPP11 + static void* create_local_by_move( internal::ets_base& base, void* p ) { + enumerable_thread_specific& ets = static_cast(base); + padded_element& lref = *ets.my_locals.grow_by(1); + new(lref.value()) T(std::move(*static_cast(p))); + return lref.value_committed(); + } +#endif + + typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type; + + // _size is in bytes + void* create_array(size_t _size) __TBB_override { + size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); + return array_allocator_type().allocate(nelements); + } + + void free_array( void* _ptr, size_t _size) __TBB_override { + size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); + array_allocator_type().deallocate( reinterpret_cast(_ptr),nelements); + } + + public: + + //! Basic types + typedef Allocator allocator_type; + typedef T value_type; + typedef T& reference; + typedef const T& const_reference; + typedef T* pointer; + typedef const T* const_pointer; + typedef typename internal_collection_type::size_type size_type; + typedef typename internal_collection_type::difference_type difference_type; + + // Iterator types + typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator; + typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator; + + // Parallel range types + typedef generic_range_type< iterator > range_type; + typedef generic_range_type< const_iterator > const_range_type; + + //! Default constructor. Each local instance of T is default constructed. + enumerable_thread_specific() : my_construct_callback( + internal::callback_leaf >::make(/*dummy argument*/0) + ){} + + //! Constructor with initializer functor. Each local instance of T is constructed by T(finit()). + template ::type>::value>::type +#endif + > + explicit enumerable_thread_specific( Finit finit ) : my_construct_callback( + internal::callback_leaf >::make( tbb::internal::move(finit) ) + ){} + + //! Constructor with exemplar. Each local instance of T is copy-constructed from the exemplar. + explicit enumerable_thread_specific( const T& exemplar ) : my_construct_callback( + internal::callback_leaf >::make( exemplar ) + ){} + +#if __TBB_ETS_USE_CPP11 + explicit enumerable_thread_specific( T&& exemplar ) : my_construct_callback( + internal::callback_leaf >::make( std::move(exemplar) ) + ){} + + //! Variadic constructor with initializer arguments. Each local instance of T is constructed by T(args...) + template ::type>::value + && !internal::is_compatible_ets::type>::value + && !internal::is_same_type::type>::value + >::type> + enumerable_thread_specific( P1&& arg1, P&& ... args ) : my_construct_callback( + internal::callback_leaf >::make( std::forward(arg1), std::forward

(args)... ) + ){} +#endif + + //! Destructor + ~enumerable_thread_specific() { + if(my_construct_callback) my_construct_callback->destroy(); + // Deallocate the hash table before overridden free_array() becomes inaccessible + this->internal::ets_base::table_clear(); + } + + //! returns reference to local, discarding exists + reference local() { + bool exists; + return local(exists); + } + + //! Returns reference to calling thread's local copy, creating one if necessary + reference local(bool& exists) { + void* ptr = this->table_lookup(exists); + return *(T*)ptr; + } + + //! Get the number of local copies + size_type size() const { return my_locals.size(); } + + //! true if there have been no local copies created + bool empty() const { return my_locals.empty(); } + + //! begin iterator + iterator begin() { return iterator( my_locals, 0 ); } + //! end iterator + iterator end() { return iterator(my_locals, my_locals.size() ); } + + //! begin const iterator + const_iterator begin() const { return const_iterator(my_locals, 0); } + + //! end const iterator + const_iterator end() const { return const_iterator(my_locals, my_locals.size()); } + + //! Get range for parallel algorithms + range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } + + //! Get const range for parallel algorithms + const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); } + + //! Destroys local copies + void clear() { + my_locals.clear(); + this->table_clear(); + // callback is not destroyed + } + + private: + + template + void internal_copy(const enumerable_thread_specific& other) { +#if __TBB_ETS_USE_CPP11 && TBB_USE_ASSERT + // this tests is_compatible_ets + __TBB_STATIC_ASSERT( (internal::is_compatible_ets::type>::value), "is_compatible_ets fails" ); +#endif + // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception. + my_construct_callback = other.my_construct_callback->clone(); + __TBB_ASSERT(my_locals.size()==0,NULL); + my_locals.reserve(other.size()); + this->table_elementwise_copy( other, create_local_by_copy ); + } + + void internal_swap(enumerable_thread_specific& other) { + using std::swap; + __TBB_ASSERT( this!=&other, NULL ); + swap(my_construct_callback, other.my_construct_callback); + // concurrent_vector::swap() preserves storage space, + // so addresses to the vector kept in ETS hash table remain valid. + swap(my_locals, other.my_locals); + this->internal::ets_base::table_swap(other); + } + +#if __TBB_ETS_USE_CPP11 + template + void internal_move(enumerable_thread_specific&& other) { +#if TBB_USE_ASSERT + // this tests is_compatible_ets + __TBB_STATIC_ASSERT( (internal::is_compatible_ets::type>::value), "is_compatible_ets fails" ); +#endif + my_construct_callback = other.my_construct_callback; + other.my_construct_callback = NULL; + __TBB_ASSERT(my_locals.size()==0,NULL); + my_locals.reserve(other.size()); + this->table_elementwise_copy( other, create_local_by_move ); + } +#endif + + public: + + enumerable_thread_specific( const enumerable_thread_specific& other ) + : internal::ets_base() /* prevents GCC warnings with -Wextra */ + { + internal_copy(other); + } + + template + enumerable_thread_specific( const enumerable_thread_specific& other ) + { + internal_copy(other); + } + +#if __TBB_ETS_USE_CPP11 + enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback() + { + internal_swap(other); + } + + template + enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback() + { + internal_move(std::move(other)); + } +#endif + + enumerable_thread_specific& operator=( const enumerable_thread_specific& other ) + { + if( this != &other ) { + this->clear(); + my_construct_callback->destroy(); + internal_copy( other ); + } + return *this; + } + + template + enumerable_thread_specific& operator=( const enumerable_thread_specific& other ) + { + __TBB_ASSERT( static_cast(this)!=static_cast(&other), NULL ); // Objects of different types + this->clear(); + my_construct_callback->destroy(); + internal_copy(other); + return *this; + } + +#if __TBB_ETS_USE_CPP11 + enumerable_thread_specific& operator=( enumerable_thread_specific&& other ) + { + if( this != &other ) + internal_swap(other); + return *this; + } + + template + enumerable_thread_specific& operator=( enumerable_thread_specific&& other ) + { + __TBB_ASSERT( static_cast(this)!=static_cast(&other), NULL ); // Objects of different types + this->clear(); + my_construct_callback->destroy(); + internal_move(std::move(other)); + return *this; + } +#endif + + // combine_func_t has signature T(T,T) or T(const T&, const T&) + template + T combine(combine_func_t f_combine) { + if(begin() == end()) { + internal::ets_element location; + my_construct_callback->construct(location.value()); + return *location.value_committed(); + } + const_iterator ci = begin(); + T my_result = *ci; + while(++ci != end()) + my_result = f_combine( my_result, *ci ); + return my_result; + } + + // combine_func_t takes T by value or by [const] reference, and returns nothing + template + void combine_each(combine_func_t f_combine) { + for(iterator ci = begin(); ci != end(); ++ci) { + f_combine( *ci ); + } + } + + }; // enumerable_thread_specific + + template< typename Container > + class flattened2d { + + // This intermediate typedef is to address issues with VC7.1 compilers + typedef typename Container::value_type conval_type; + + public: + + //! Basic types + typedef typename conval_type::size_type size_type; + typedef typename conval_type::difference_type difference_type; + typedef typename conval_type::allocator_type allocator_type; + typedef typename conval_type::value_type value_type; + typedef typename conval_type::reference reference; + typedef typename conval_type::const_reference const_reference; + typedef typename conval_type::pointer pointer; + typedef typename conval_type::const_pointer const_pointer; + + typedef typename internal::segmented_iterator iterator; + typedef typename internal::segmented_iterator const_iterator; + + flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : + my_container(const_cast(&c)), my_begin(b), my_end(e) { } + + explicit flattened2d( const Container &c ) : + my_container(const_cast(&c)), my_begin(c.begin()), my_end(c.end()) { } + + iterator begin() { return iterator(*my_container) = my_begin; } + iterator end() { return iterator(*my_container) = my_end; } + const_iterator begin() const { return const_iterator(*my_container) = my_begin; } + const_iterator end() const { return const_iterator(*my_container) = my_end; } + + size_type size() const { + size_type tot_size = 0; + for(typename Container::const_iterator i = my_begin; i != my_end; ++i) { + tot_size += i->size(); + } + return tot_size; + } + + private: + + Container *my_container; + typename Container::const_iterator my_begin; + typename Container::const_iterator my_end; + + }; + + template + flattened2d flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) { + return flattened2d(c, b, e); + } + + template + flattened2d flatten2d(const Container &c) { + return flattened2d(c); + } + +} // interface6 + +namespace internal { +using interface6::internal::segmented_iterator; +} + +using interface6::enumerable_thread_specific; +using interface6::flattened2d; +using interface6::flatten2d; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_enumerable_thread_specific_H_include_area + +#endif diff --git a/ohos/arm64-v8a/include/tbb/flow_graph.h b/ohos/arm64-v8a/include/tbb/flow_graph.h new file mode 100644 index 00000000..8c9702e1 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/flow_graph.h @@ -0,0 +1,4735 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_H +#define __TBB_flow_graph_H + +#define __TBB_flow_graph_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_stddef.h" +#include "atomic.h" +#include "spin_mutex.h" +#include "null_mutex.h" +#include "spin_rw_mutex.h" +#include "null_rw_mutex.h" +#include "task.h" +#include "cache_aligned_allocator.h" +#include "tbb_exception.h" +#include "internal/_template_helpers.h" +#include "internal/_aggregator_impl.h" +#include "tbb/internal/_allocator_traits.h" +#include "tbb_profiling.h" +#include "task_arena.h" + +#if TBB_USE_THREADING_TOOLS && TBB_PREVIEW_FLOW_GRAPH_TRACE && ( __linux__ || __APPLE__ ) + #if __INTEL_COMPILER + // Disabled warning "routine is both inline and noinline" + #pragma warning (push) + #pragma warning( disable: 2196 ) + #endif + #define __TBB_NOINLINE_SYM __attribute__((noinline)) +#else + #define __TBB_NOINLINE_SYM +#endif + +#if __TBB_PREVIEW_ASYNC_MSG +#include // std::vector in internal::async_storage +#include // std::shared_ptr in async_msg +#endif + +#if __TBB_PREVIEW_STREAMING_NODE +// For streaming_node +#include // std::array +#include // std::unordered_map +#include // std::decay, std::true_type, std::false_type +#endif // __TBB_PREVIEW_STREAMING_NODE + +#if TBB_DEPRECATED_FLOW_ENQUEUE +#define FLOW_SPAWN(a) tbb::task::enqueue((a)) +#else +#define FLOW_SPAWN(a) tbb::task::spawn((a)) +#endif + +#if TBB_DEPRECATED_FLOW_NODE_ALLOCATOR +#define __TBB_DEFAULT_NODE_ALLOCATOR(T) cache_aligned_allocator +#else +#define __TBB_DEFAULT_NODE_ALLOCATOR(T) null_type +#endif + +// use the VC10 or gcc version of tuple if it is available. +#if __TBB_CPP11_TUPLE_PRESENT + #include +namespace tbb { + namespace flow { + using std::tuple; + using std::tuple_size; + using std::tuple_element; + using std::get; + } +} +#else + #include "compat/tuple" +#endif + +#include +#include + +/** @file + \brief The graph related classes and functions + + There are some applications that best express dependencies as messages + passed between nodes in a graph. These messages may contain data or + simply act as signals that a predecessors has completed. The graph + class and its associated node classes can be used to express such + applications. +*/ + +namespace tbb { +namespace flow { + +//! An enumeration the provides the two most common concurrency levels: unlimited and serial +enum concurrency { unlimited = 0, serial = 1 }; + +namespace interface11 { + +//! A generic null type +struct null_type {}; + +//! An empty class used for messages that mean "I'm done" +class continue_msg {}; + +//! Forward declaration section +template< typename T > class sender; +template< typename T > class receiver; +class continue_receiver; + +template< typename T, typename U > class limiter_node; // needed for resetting decrementer + +template< typename R, typename B > class run_and_put_task; + +namespace internal { + +template class successor_cache; +template class broadcast_cache; +template class round_robin_cache; +template class predecessor_cache; +template class reservable_predecessor_cache; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +namespace order { +struct following; +struct preceding; +} +template struct node_set; +#endif + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION +// Holder of edges both for caches and for those nodes which do not have predecessor caches. +// C == receiver< ... > or sender< ... >, depending. +template +class edge_container { + +public: + typedef std::list > edge_list_type; + + void add_edge(C &s) { + built_edges.push_back(&s); + } + + void delete_edge(C &s) { + for (typename edge_list_type::iterator i = built_edges.begin(); i != built_edges.end(); ++i) { + if (*i == &s) { + (void)built_edges.erase(i); + return; // only remove one predecessor per request + } + } + } + + void copy_edges(edge_list_type &v) { + v = built_edges; + } + + size_t edge_count() { + return (size_t)(built_edges.size()); + } + + void clear() { + built_edges.clear(); + } + + // methods remove the statement from all predecessors/successors liste in the edge + // container. + template< typename S > void sender_extract(S &s); + template< typename R > void receiver_extract(R &r); + +private: + edge_list_type built_edges; +}; // class edge_container +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +} // namespace internal + +} // namespace interfaceX +} // namespace flow +} // namespace tbb + +//! The graph class +#include "internal/_flow_graph_impl.h" + +namespace tbb { +namespace flow { +namespace interface11 { + +// enqueue left task if necessary. Returns the non-enqueued task if there is one. +static inline tbb::task *combine_tasks(graph& g, tbb::task * left, tbb::task * right) { + // if no RHS task, don't change left. + if (right == NULL) return left; + // right != NULL + if (left == NULL) return right; + if (left == SUCCESSFULLY_ENQUEUED) return right; + // left contains a task + if (right != SUCCESSFULLY_ENQUEUED) { + // both are valid tasks + internal::spawn_in_graph_arena(g, *left); + return right; + } + return left; +} + +#if __TBB_PREVIEW_ASYNC_MSG + +template < typename T > class __TBB_DEPRECATED async_msg; + +namespace internal { + +template < typename T > class async_storage; + +template< typename T, typename = void > +struct async_helpers { + typedef async_msg async_type; + typedef T filtered_type; + + static const bool is_async_type = false; + + static const void* to_void_ptr(const T& t) { + return static_cast(&t); + } + + static void* to_void_ptr(T& t) { + return static_cast(&t); + } + + static const T& from_void_ptr(const void* p) { + return *static_cast(p); + } + + static T& from_void_ptr(void* p) { + return *static_cast(p); + } + + static task* try_put_task_wrapper_impl(receiver* const this_recv, const void *p, bool is_async) { + if (is_async) { + // This (T) is NOT async and incoming 'A t' IS async + // Get data from async_msg + const async_msg& msg = async_helpers< async_msg >::from_void_ptr(p); + task* const new_task = msg.my_storage->subscribe(*this_recv, this_recv->graph_reference()); + // finalize() must be called after subscribe() because set() can be called in finalize() + // and 'this_recv' client must be subscribed by this moment + msg.finalize(); + return new_task; + } + else { + // Incoming 't' is NOT async + return this_recv->try_put_task(from_void_ptr(p)); + } + } +}; + +template< typename T > +struct async_helpers< T, typename std::enable_if< std::is_base_of, T>::value >::type > { + typedef T async_type; + typedef typename T::async_msg_data_type filtered_type; + + static const bool is_async_type = true; + + // Receiver-classes use const interfaces + static const void* to_void_ptr(const T& t) { + return static_cast(&static_cast&>(t)); + } + + static void* to_void_ptr(T& t) { + return static_cast(&static_cast&>(t)); + } + + // Sender-classes use non-const interfaces + static const T& from_void_ptr(const void* p) { + return *static_cast(static_cast*>(p)); + } + + static T& from_void_ptr(void* p) { + return *static_cast(static_cast*>(p)); + } + + // Used in receiver class + static task* try_put_task_wrapper_impl(receiver* const this_recv, const void *p, bool is_async) { + if (is_async) { + // Both are async + return this_recv->try_put_task(from_void_ptr(p)); + } + else { + // This (T) is async and incoming 'X t' is NOT async + // Create async_msg for X + const filtered_type& t = async_helpers::from_void_ptr(p); + const T msg(t); + return this_recv->try_put_task(msg); + } + } +}; + +class untyped_receiver; + +class untyped_sender { + template< typename, typename > friend class internal::predecessor_cache; + template< typename, typename > friend class internal::reservable_predecessor_cache; +public: + //! The successor type for this node + typedef untyped_receiver successor_type; + + virtual ~untyped_sender() {} + + // NOTE: Following part of PUBLIC section is copy-paste from original sender class + + // TODO: Prevent untyped successor registration + + //! Add a new successor to this node + virtual bool register_successor( successor_type &r ) = 0; + + //! Removes a successor from this node + virtual bool remove_successor( successor_type &r ) = 0; + + //! Releases the reserved item + virtual bool try_release( ) { return false; } + + //! Consumes the reserved item + virtual bool try_consume( ) { return false; } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + //! interface to record edges for traversal & deletion + typedef internal::edge_container built_successors_type; + typedef built_successors_type::edge_list_type successor_list_type; + virtual built_successors_type &built_successors() = 0; + virtual void internal_add_built_successor( successor_type & ) = 0; + virtual void internal_delete_built_successor( successor_type & ) = 0; + virtual void copy_successors( successor_list_type &) = 0; + virtual size_t successor_count() = 0; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ +protected: + //! Request an item from the sender + template< typename X > + bool try_get( X &t ) { + return try_get_wrapper( internal::async_helpers::to_void_ptr(t), internal::async_helpers::is_async_type ); + } + + //! Reserves an item in the sender + template< typename X > + bool try_reserve( X &t ) { + return try_reserve_wrapper( internal::async_helpers::to_void_ptr(t), internal::async_helpers::is_async_type ); + } + + virtual bool try_get_wrapper( void* p, bool is_async ) = 0; + virtual bool try_reserve_wrapper( void* p, bool is_async ) = 0; +}; + +class untyped_receiver { + template< typename, typename > friend class run_and_put_task; + + template< typename, typename > friend class internal::broadcast_cache; + template< typename, typename > friend class internal::round_robin_cache; + template< typename, typename > friend class internal::successor_cache; + +#if __TBB_PREVIEW_OPENCL_NODE + template< typename, typename > friend class proxy_dependency_receiver; +#endif /* __TBB_PREVIEW_OPENCL_NODE */ +public: + //! The predecessor type for this node + typedef untyped_sender predecessor_type; + + //! Destructor + virtual ~untyped_receiver() {} + + //! Put an item to the receiver + template + bool try_put(const X& t) { + task *res = try_put_task(t); + if (!res) return false; + if (res != SUCCESSFULLY_ENQUEUED) internal::spawn_in_graph_arena(graph_reference(), *res); + return true; + } + + // NOTE: Following part of PUBLIC section is copy-paste from original receiver class + + // TODO: Prevent untyped predecessor registration + + //! Add a predecessor to the node + virtual bool register_predecessor( predecessor_type & ) { return false; } + + //! Remove a predecessor from the node + virtual bool remove_predecessor( predecessor_type & ) { return false; } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef internal::edge_container built_predecessors_type; + typedef built_predecessors_type::edge_list_type predecessor_list_type; + virtual built_predecessors_type &built_predecessors() = 0; + virtual void internal_add_built_predecessor( predecessor_type & ) = 0; + virtual void internal_delete_built_predecessor( predecessor_type & ) = 0; + virtual void copy_predecessors( predecessor_list_type & ) = 0; + virtual size_t predecessor_count() = 0; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ +protected: + template + task *try_put_task(const X& t) { + return try_put_task_wrapper( internal::async_helpers::to_void_ptr(t), internal::async_helpers::is_async_type ); + } + + virtual task* try_put_task_wrapper( const void* p, bool is_async ) = 0; + + virtual graph& graph_reference() const = 0; + + // NOTE: Following part of PROTECTED and PRIVATE sections is copy-paste from original receiver class + + //! put receiver back in initial state + virtual void reset_receiver(reset_flags f = rf_reset_protocol) = 0; + + virtual bool is_continue_receiver() { return false; } +}; + +} // namespace internal + +//! Pure virtual template class that defines a sender of messages of type T +template< typename T > +class sender : public internal::untyped_sender { +public: + //! The output type of this sender + __TBB_DEPRECATED typedef T output_type; + + __TBB_DEPRECATED typedef typename internal::async_helpers::filtered_type filtered_type; + + //! Request an item from the sender + virtual bool try_get( T & ) { return false; } + + //! Reserves an item in the sender + virtual bool try_reserve( T & ) { return false; } + +protected: + virtual bool try_get_wrapper( void* p, bool is_async ) __TBB_override { + // Both async OR both are NOT async + if ( internal::async_helpers::is_async_type == is_async ) { + return try_get( internal::async_helpers::from_void_ptr(p) ); + } + // Else: this (T) is async OR incoming 't' is async + __TBB_ASSERT(false, "async_msg interface does not support 'pull' protocol in try_get()"); + return false; + } + + virtual bool try_reserve_wrapper( void* p, bool is_async ) __TBB_override { + // Both async OR both are NOT async + if ( internal::async_helpers::is_async_type == is_async ) { + return try_reserve( internal::async_helpers::from_void_ptr(p) ); + } + // Else: this (T) is async OR incoming 't' is async + __TBB_ASSERT(false, "async_msg interface does not support 'pull' protocol in try_reserve()"); + return false; + } +}; // class sender + +//! Pure virtual template class that defines a receiver of messages of type T +template< typename T > +class receiver : public internal::untyped_receiver { + template< typename > friend class internal::async_storage; + template< typename, typename > friend struct internal::async_helpers; +public: + //! The input type of this receiver + __TBB_DEPRECATED typedef T input_type; + + __TBB_DEPRECATED typedef typename internal::async_helpers::filtered_type filtered_type; + + //! Put an item to the receiver + bool try_put( const typename internal::async_helpers::filtered_type& t ) { + return internal::untyped_receiver::try_put(t); + } + + bool try_put( const typename internal::async_helpers::async_type& t ) { + return internal::untyped_receiver::try_put(t); + } + +protected: + virtual task* try_put_task_wrapper( const void *p, bool is_async ) __TBB_override { + return internal::async_helpers::try_put_task_wrapper_impl(this, p, is_async); + } + + //! Put item to successor; return task to run the successor if possible. + virtual task *try_put_task(const T& t) = 0; + +}; // class receiver + +#else // __TBB_PREVIEW_ASYNC_MSG + +//! Pure virtual template class that defines a sender of messages of type T +template< typename T > +class sender { +public: + //! The output type of this sender + __TBB_DEPRECATED typedef T output_type; + + //! The successor type for this node + __TBB_DEPRECATED typedef receiver successor_type; + + virtual ~sender() {} + + // NOTE: Following part of PUBLIC section is partly copy-pasted in sender under #if __TBB_PREVIEW_ASYNC_MSG + + //! Add a new successor to this node + __TBB_DEPRECATED virtual bool register_successor( successor_type &r ) = 0; + + //! Removes a successor from this node + __TBB_DEPRECATED virtual bool remove_successor( successor_type &r ) = 0; + + //! Request an item from the sender + virtual bool try_get( T & ) { return false; } + + //! Reserves an item in the sender + virtual bool try_reserve( T & ) { return false; } + + //! Releases the reserved item + virtual bool try_release( ) { return false; } + + //! Consumes the reserved item + virtual bool try_consume( ) { return false; } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + //! interface to record edges for traversal & deletion + __TBB_DEPRECATED typedef typename internal::edge_container built_successors_type; + __TBB_DEPRECATED typedef typename built_successors_type::edge_list_type successor_list_type; + __TBB_DEPRECATED virtual built_successors_type &built_successors() = 0; + __TBB_DEPRECATED virtual void internal_add_built_successor( successor_type & ) = 0; + __TBB_DEPRECATED virtual void internal_delete_built_successor( successor_type & ) = 0; + __TBB_DEPRECATED virtual void copy_successors( successor_list_type &) = 0; + __TBB_DEPRECATED virtual size_t successor_count() = 0; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ +}; // class sender + +//! Pure virtual template class that defines a receiver of messages of type T +template< typename T > +class receiver { +public: + //! The input type of this receiver + __TBB_DEPRECATED typedef T input_type; + + //! The predecessor type for this node + __TBB_DEPRECATED typedef sender predecessor_type; + + //! Destructor + virtual ~receiver() {} + + //! Put an item to the receiver + bool try_put( const T& t ) { + task *res = try_put_task(t); + if (!res) return false; + if (res != SUCCESSFULLY_ENQUEUED) internal::spawn_in_graph_arena(graph_reference(), *res); + return true; + } + + //! put item to successor; return task to run the successor if possible. +protected: + template< typename R, typename B > friend class run_and_put_task; + template< typename X, typename Y > friend class internal::broadcast_cache; + template< typename X, typename Y > friend class internal::round_robin_cache; + virtual task *try_put_task(const T& t) = 0; + virtual graph& graph_reference() const = 0; +public: + // NOTE: Following part of PUBLIC and PROTECTED sections is copy-pasted in receiver under #if __TBB_PREVIEW_ASYNC_MSG + + //! Add a predecessor to the node + __TBB_DEPRECATED virtual bool register_predecessor( predecessor_type & ) { return false; } + + //! Remove a predecessor from the node + __TBB_DEPRECATED virtual bool remove_predecessor( predecessor_type & ) { return false; } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + __TBB_DEPRECATED typedef typename internal::edge_container built_predecessors_type; + __TBB_DEPRECATED typedef typename built_predecessors_type::edge_list_type predecessor_list_type; + __TBB_DEPRECATED virtual built_predecessors_type &built_predecessors() = 0; + __TBB_DEPRECATED virtual void internal_add_built_predecessor( predecessor_type & ) = 0; + __TBB_DEPRECATED virtual void internal_delete_built_predecessor( predecessor_type & ) = 0; + __TBB_DEPRECATED virtual void copy_predecessors( predecessor_list_type & ) = 0; + __TBB_DEPRECATED virtual size_t predecessor_count() = 0; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +protected: + //! put receiver back in initial state + virtual void reset_receiver(reset_flags f = rf_reset_protocol) = 0; + + template friend class internal::successor_cache; + virtual bool is_continue_receiver() { return false; } + +#if __TBB_PREVIEW_OPENCL_NODE + template< typename, typename > friend class proxy_dependency_receiver; +#endif /* __TBB_PREVIEW_OPENCL_NODE */ +}; // class receiver + +#endif // __TBB_PREVIEW_ASYNC_MSG + +//! Base class for receivers of completion messages +/** These receivers automatically reset, but cannot be explicitly waited on */ +class continue_receiver : public receiver< continue_msg > { +public: + + //! The input type + __TBB_DEPRECATED typedef continue_msg input_type; + + //! The predecessor type for this node + __TBB_DEPRECATED typedef receiver::predecessor_type predecessor_type; + + //! Constructor + __TBB_DEPRECATED explicit continue_receiver( + __TBB_FLOW_GRAPH_PRIORITY_ARG1(int number_of_predecessors, node_priority_t priority)) { + my_predecessor_count = my_initial_predecessor_count = number_of_predecessors; + my_current_count = 0; + __TBB_FLOW_GRAPH_PRIORITY_EXPR( my_priority = priority; ) + } + + //! Copy constructor + __TBB_DEPRECATED continue_receiver( const continue_receiver& src ) : receiver() { + my_predecessor_count = my_initial_predecessor_count = src.my_initial_predecessor_count; + my_current_count = 0; + __TBB_FLOW_GRAPH_PRIORITY_EXPR( my_priority = src.my_priority; ) + } + + //! Increments the trigger threshold + __TBB_DEPRECATED bool register_predecessor( predecessor_type & ) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + ++my_predecessor_count; + return true; + } + + //! Decrements the trigger threshold + /** Does not check to see if the removal of the predecessor now makes the current count + exceed the new threshold. So removing a predecessor while the graph is active can cause + unexpected results. */ + __TBB_DEPRECATED bool remove_predecessor( predecessor_type & ) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + --my_predecessor_count; + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + __TBB_DEPRECATED typedef internal::edge_container built_predecessors_type; + __TBB_DEPRECATED typedef built_predecessors_type::edge_list_type predecessor_list_type; + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + + __TBB_DEPRECATED void internal_add_built_predecessor( predecessor_type &s) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.add_edge( s ); + } + + __TBB_DEPRECATED void internal_delete_built_predecessor( predecessor_type &s) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.delete_edge(s); + } + + __TBB_DEPRECATED void copy_predecessors( predecessor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.copy_edges(v); + } + + __TBB_DEPRECATED size_t predecessor_count() __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + return my_built_predecessors.edge_count(); + } + +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + // execute body is supposed to be too small to create a task for. + task *try_put_task( const input_type & ) __TBB_override { + { + spin_mutex::scoped_lock l(my_mutex); + if ( ++my_current_count < my_predecessor_count ) + return SUCCESSFULLY_ENQUEUED; + else + my_current_count = 0; + } + task * res = execute(); + return res? res : SUCCESSFULLY_ENQUEUED; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + // continue_receiver must contain its own built_predecessors because it does + // not have a node_cache. + built_predecessors_type my_built_predecessors; +#endif + spin_mutex my_mutex; + int my_predecessor_count; + int my_current_count; + int my_initial_predecessor_count; + __TBB_FLOW_GRAPH_PRIORITY_EXPR( node_priority_t my_priority; ) + // the friend declaration in the base class did not eliminate the "protected class" + // error in gcc 4.1.2 + template friend class tbb::flow::interface11::limiter_node; + + void reset_receiver( reset_flags f ) __TBB_override { + my_current_count = 0; + if (f & rf_clear_edges) { +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + my_built_predecessors.clear(); +#endif + my_predecessor_count = my_initial_predecessor_count; + } + } + + //! Does whatever should happen when the threshold is reached + /** This should be very fast or else spawn a task. This is + called while the sender is blocked in the try_put(). */ + virtual task * execute() = 0; + template friend class internal::successor_cache; + bool is_continue_receiver() __TBB_override { return true; } + +}; // class continue_receiver + +} // interfaceX + +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + template + K key_from_message( const T &t ) { + return t.key(); + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + + using interface11::sender; + using interface11::receiver; + using interface11::continue_receiver; +} // flow +} // tbb + +#include "internal/_flow_graph_trace_impl.h" +#include "internal/_tbb_hash_compare_impl.h" + +namespace tbb { +namespace flow { +namespace interface11 { + +#include "internal/_flow_graph_body_impl.h" +#include "internal/_flow_graph_cache_impl.h" +#include "internal/_flow_graph_types_impl.h" +#if __TBB_PREVIEW_ASYNC_MSG +#include "internal/_flow_graph_async_msg_impl.h" +#endif +using namespace internal::graph_policy_namespace; + +template +graph_iterator::graph_iterator(C *g, bool begin) : my_graph(g), current_node(NULL) +{ + if (begin) current_node = my_graph->my_nodes; + //else it is an end iterator by default +} + +template +typename graph_iterator::reference graph_iterator::operator*() const { + __TBB_ASSERT(current_node, "graph_iterator at end"); + return *operator->(); +} + +template +typename graph_iterator::pointer graph_iterator::operator->() const { + return current_node; +} + +template +void graph_iterator::internal_forward() { + if (current_node) current_node = current_node->next; +} + +} // namespace interfaceX + +namespace interface10 { +//! Constructs a graph with isolated task_group_context +inline graph::graph() : my_nodes(NULL), my_nodes_last(NULL), my_task_arena(NULL) { + prepare_task_arena(); + own_context = true; + cancelled = false; + caught_exception = false; + my_context = new task_group_context(tbb::internal::FLOW_TASKS); + my_root_task = (new (task::allocate_root(*my_context)) empty_task); + my_root_task->set_ref_count(1); + tbb::internal::fgt_graph(this); + my_is_active = true; +} + +inline graph::graph(task_group_context& use_this_context) : + my_context(&use_this_context), my_nodes(NULL), my_nodes_last(NULL), my_task_arena(NULL) { + prepare_task_arena(); + own_context = false; + cancelled = false; + caught_exception = false; + my_root_task = (new (task::allocate_root(*my_context)) empty_task); + my_root_task->set_ref_count(1); + tbb::internal::fgt_graph(this); + my_is_active = true; +} + +inline graph::~graph() { + wait_for_all(); + my_root_task->set_ref_count(0); + tbb::task::destroy(*my_root_task); + if (own_context) delete my_context; + delete my_task_arena; +} + +inline void graph::reserve_wait() { + if (my_root_task) { + my_root_task->increment_ref_count(); + tbb::internal::fgt_reserve_wait(this); + } +} + +inline void graph::release_wait() { + if (my_root_task) { + tbb::internal::fgt_release_wait(this); + my_root_task->decrement_ref_count(); + } +} + +inline void graph::register_node(tbb::flow::interface11::graph_node *n) { + n->next = NULL; + { + spin_mutex::scoped_lock lock(nodelist_mutex); + n->prev = my_nodes_last; + if (my_nodes_last) my_nodes_last->next = n; + my_nodes_last = n; + if (!my_nodes) my_nodes = n; + } +} + +inline void graph::remove_node(tbb::flow::interface11::graph_node *n) { + { + spin_mutex::scoped_lock lock(nodelist_mutex); + __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error: no registered nodes"); + if (n->prev) n->prev->next = n->next; + if (n->next) n->next->prev = n->prev; + if (my_nodes_last == n) my_nodes_last = n->prev; + if (my_nodes == n) my_nodes = n->next; + } + n->prev = n->next = NULL; +} + +inline void graph::reset( tbb::flow::interface11::reset_flags f ) { + // reset context + tbb::flow::interface11::internal::deactivate_graph(*this); + + if(my_context) my_context->reset(); + cancelled = false; + caught_exception = false; + // reset all the nodes comprising the graph + for(iterator ii = begin(); ii != end(); ++ii) { + tbb::flow::interface11::graph_node *my_p = &(*ii); + my_p->reset_node(f); + } + // Reattach the arena. Might be useful to run the graph in a particular task_arena + // while not limiting graph lifetime to a single task_arena::execute() call. + prepare_task_arena( /*reinit=*/true ); + tbb::flow::interface11::internal::activate_graph(*this); + // now spawn the tasks necessary to start the graph + for(task_list_type::iterator rti = my_reset_task_list.begin(); rti != my_reset_task_list.end(); ++rti) { + tbb::flow::interface11::internal::spawn_in_graph_arena(*this, *(*rti)); + } + my_reset_task_list.clear(); +} + +inline graph::iterator graph::begin() { return iterator(this, true); } + +inline graph::iterator graph::end() { return iterator(this, false); } + +inline graph::const_iterator graph::begin() const { return const_iterator(this, true); } + +inline graph::const_iterator graph::end() const { return const_iterator(this, false); } + +inline graph::const_iterator graph::cbegin() const { return const_iterator(this, true); } + +inline graph::const_iterator graph::cend() const { return const_iterator(this, false); } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE +inline void graph::set_name(const char *name) { + tbb::internal::fgt_graph_desc(this, name); +} +#endif + +} // namespace interface10 + +namespace interface11 { + +inline graph_node::graph_node(graph& g) : my_graph(g) { + my_graph.register_node(this); +} + +inline graph_node::~graph_node() { + my_graph.remove_node(this); +} + +#include "internal/_flow_graph_node_impl.h" + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +using internal::node_set; +#endif + +//! An executable node that acts as a source, i.e. it has no predecessors +template < typename Output > +class input_node : public graph_node, public sender< Output > { +public: + //! The type of the output message, which is complete + typedef Output output_type; + + //! The type of successors of this node + typedef typename sender::successor_type successor_type; + + //Source node has no input type + typedef null_type input_type; + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename sender::built_successors_type built_successors_type; + typedef typename sender::successor_list_type successor_list_type; +#endif + + //! Constructor for a node with a successor + template< typename Body > + __TBB_NOINLINE_SYM input_node( graph &g, Body body ) + : graph_node(g), my_active(false), + my_body( new internal::source_body_leaf< output_type, Body>(body) ), + my_init_body( new internal::source_body_leaf< output_type, Body>(body) ), + my_reserved(false), my_has_cached_item(false) + { + my_successors.set_owner(this); + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, + static_cast *>(this), this->my_body ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + input_node( const node_set& successors, Body body ) + : input_node(successors.graph_reference(), body) { + make_edges(*this, successors); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM input_node( const input_node& src ) : + graph_node(src.my_graph), sender(), + my_active(false), + my_body( src.my_init_body->clone() ), my_init_body(src.my_init_body->clone() ), + my_reserved(false), my_has_cached_item(false) + { + my_successors.set_owner(this); + tbb::internal::fgt_node_with_body(CODEPTR(), tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, + static_cast *>(this), this->my_body ); + } + + //! The destructor + ~input_node() { delete my_body; delete my_init_body; } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + + //! Add a new successor to this node + bool register_successor( successor_type &r ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.register_successor(r); + if ( my_active ) + spawn_put(); + return true; + } + + //! Removes a successor from this node + bool remove_successor( successor_type &r ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.remove_successor(r); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + + void internal_add_built_successor( successor_type &r) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.internal_add_built_successor(r); + } + + void internal_delete_built_successor( successor_type &r) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.internal_delete_built_successor(r); + } + + size_t successor_count() __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + return my_successors.successor_count(); + } + + void copy_successors(successor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_successors.copy_successors(v); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + //! Request an item from the node + bool try_get( output_type &v ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) + return false; + + if ( my_has_cached_item ) { + v = my_cached_item; + my_has_cached_item = false; + return true; + } + // we've been asked to provide an item, but we have none. enqueue a task to + // provide one. + if ( my_active ) + spawn_put(); + return false; + } + + //! Reserves an item. + bool try_reserve( output_type &v ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) { + return false; + } + + if ( my_has_cached_item ) { + v = my_cached_item; + my_reserved = true; + return true; + } else { + return false; + } + } + + //! Release a reserved item. + /** true = item has been released and so remains in sender, dest must request or reserve future items */ + bool try_release( ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + __TBB_ASSERT( my_reserved && my_has_cached_item, "releasing non-existent reservation" ); + my_reserved = false; + if(!my_successors.empty()) + spawn_put(); + return true; + } + + //! Consumes a reserved item + bool try_consume( ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + __TBB_ASSERT( my_reserved && my_has_cached_item, "consuming non-existent reservation" ); + my_reserved = false; + my_has_cached_item = false; + if ( !my_successors.empty() ) { + spawn_put(); + } + return true; + } + + //! Activates a node that was created in the inactive state + void activate() { + spin_mutex::scoped_lock lock(my_mutex); + my_active = true; + if (!my_successors.empty()) + spawn_put(); + } + + template + Body copy_function_object() { + internal::source_body &body_ref = *this->my_body; + return dynamic_cast< internal::source_body_leaf & >(body_ref).get_body(); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract( ) __TBB_override { + my_successors.built_successors().sender_extract(*this); // removes "my_owner" == this from each successor + my_active = false; + my_reserved = false; + if(my_has_cached_item) my_has_cached_item = false; + } +#endif + +protected: + + //! resets the source_node to its initial state + void reset_node( reset_flags f) __TBB_override { + my_active = false; + my_reserved = false; + my_has_cached_item = false; + + if(f & rf_clear_edges) my_successors.clear(); + if(f & rf_reset_bodies) { + internal::source_body *tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + } + +private: + spin_mutex my_mutex; + bool my_active; + internal::source_body *my_body; + internal::source_body *my_init_body; + internal::broadcast_cache< output_type > my_successors; + bool my_reserved; + bool my_has_cached_item; + output_type my_cached_item; + + // used by apply_body_bypass, can invoke body of node. + bool try_reserve_apply_body(output_type &v) { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) { + return false; + } + if ( !my_has_cached_item ) { + tbb::internal::fgt_begin_body( my_body ); + bool r = (*my_body)(my_cached_item); + tbb::internal::fgt_end_body( my_body ); + if (r) { + my_has_cached_item = true; + } + } + if ( my_has_cached_item ) { + v = my_cached_item; + my_reserved = true; + return true; + } else { + return false; + } + } + + task* create_put_task() { + return ( new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal:: source_task_bypass < input_node< output_type > >( *this ) ); + } + + //! Spawns a task that applies the body + void spawn_put( ) { + if(internal::is_graph_active(this->my_graph)) { + internal::spawn_in_graph_arena(this->my_graph, *create_put_task()); + } + } + + friend class internal::source_task_bypass< input_node< output_type > >; + //! Applies the body. Returning SUCCESSFULLY_ENQUEUED okay; forward_task_bypass will handle it. + task * apply_body_bypass( ) { + output_type v; + if ( !try_reserve_apply_body(v) ) + return NULL; + + task *last_task = my_successors.try_put_task(v); + if ( last_task ) + try_consume(); + else + try_release(); + return last_task; + } +}; // class input_node + +#if TBB_USE_SOURCE_NODE_AS_ALIAS +template < typename Output > +class source_node : public input_node { +public: + //! Constructor for a node with a successor + template< typename Body > + __TBB_NOINLINE_SYM source_node( graph &g, Body body ) + : input_node(g, body) + { + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + source_node( const node_set& successors, Body body ) + : input_node(successors, body) { + } +#endif +}; +#else // TBB_USE_SOURCE_NODE_AS_ALIAS +//! An executable node that acts as a source, i.e. it has no predecessors +template < typename Output > class +__TBB_DEPRECATED_MSG("TBB Warning: tbb::flow::source_node is deprecated, use tbb::flow::input_node." ) +source_node : public graph_node, public sender< Output > { +public: + //! The type of the output message, which is complete + typedef Output output_type; + + //! The type of successors of this node + typedef typename sender::successor_type successor_type; + + //Source node has no input type + typedef null_type input_type; + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename sender::built_successors_type built_successors_type; + typedef typename sender::successor_list_type successor_list_type; +#endif + + //! Constructor for a node with a successor + template< typename Body > + __TBB_NOINLINE_SYM source_node( graph &g, Body body, bool is_active = true ) + : graph_node(g), my_active(is_active), init_my_active(is_active), + my_body( new internal::source_body_leaf< output_type, Body>(body) ), + my_init_body( new internal::source_body_leaf< output_type, Body>(body) ), + my_reserved(false), my_has_cached_item(false) + { + my_successors.set_owner(this); + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, + static_cast *>(this), this->my_body ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + source_node( const node_set& successors, Body body, bool is_active = true ) + : source_node(successors.graph_reference(), body, is_active) { + make_edges(*this, successors); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM source_node( const source_node& src ) : + graph_node(src.my_graph), sender(), + my_active(src.init_my_active), + init_my_active(src.init_my_active), my_body( src.my_init_body->clone() ), my_init_body(src.my_init_body->clone() ), + my_reserved(false), my_has_cached_item(false) + { + my_successors.set_owner(this); + tbb::internal::fgt_node_with_body(CODEPTR(), tbb::internal::FLOW_SOURCE_NODE, &this->my_graph, + static_cast *>(this), this->my_body ); + } + + //! The destructor + ~source_node() { delete my_body; delete my_init_body; } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + + //! Add a new successor to this node + bool register_successor( successor_type &r ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.register_successor(r); + if ( my_active ) + spawn_put(); + return true; + } + + //! Removes a successor from this node + bool remove_successor( successor_type &r ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.remove_successor(r); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + + void internal_add_built_successor( successor_type &r) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.internal_add_built_successor(r); + } + + void internal_delete_built_successor( successor_type &r) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_successors.internal_delete_built_successor(r); + } + + size_t successor_count() __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + return my_successors.successor_count(); + } + + void copy_successors(successor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_successors.copy_successors(v); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + //! Request an item from the node + bool try_get( output_type &v ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) + return false; + + if ( my_has_cached_item ) { + v = my_cached_item; + my_has_cached_item = false; + return true; + } + // we've been asked to provide an item, but we have none. enqueue a task to + // provide one. + spawn_put(); + return false; + } + + //! Reserves an item. + bool try_reserve( output_type &v ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) { + return false; + } + + if ( my_has_cached_item ) { + v = my_cached_item; + my_reserved = true; + return true; + } else { + return false; + } + } + + //! Release a reserved item. + /** true = item has been released and so remains in sender, dest must request or reserve future items */ + bool try_release( ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + __TBB_ASSERT( my_reserved && my_has_cached_item, "releasing non-existent reservation" ); + my_reserved = false; + if(!my_successors.empty()) + spawn_put(); + return true; + } + + //! Consumes a reserved item + bool try_consume( ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + __TBB_ASSERT( my_reserved && my_has_cached_item, "consuming non-existent reservation" ); + my_reserved = false; + my_has_cached_item = false; + if ( !my_successors.empty() ) { + spawn_put(); + } + return true; + } + + //! Activates a node that was created in the inactive state + void activate() { + spin_mutex::scoped_lock lock(my_mutex); + my_active = true; + if (!my_successors.empty()) + spawn_put(); + } + + template + Body copy_function_object() { + internal::source_body &body_ref = *this->my_body; + return dynamic_cast< internal::source_body_leaf & >(body_ref).get_body(); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract( ) __TBB_override { + my_successors.built_successors().sender_extract(*this); // removes "my_owner" == this from each successor + my_active = init_my_active; + my_reserved = false; + if(my_has_cached_item) my_has_cached_item = false; + } +#endif + +protected: + + //! resets the source_node to its initial state + void reset_node( reset_flags f) __TBB_override { + my_active = init_my_active; + my_reserved =false; + if(my_has_cached_item) { + my_has_cached_item = false; + } + if(f & rf_clear_edges) my_successors.clear(); + if(f & rf_reset_bodies) { + internal::source_body *tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + if(my_active) + internal::add_task_to_graph_reset_list(this->my_graph, create_put_task()); + } + +private: + spin_mutex my_mutex; + bool my_active; + bool init_my_active; + internal::source_body *my_body; + internal::source_body *my_init_body; + internal::broadcast_cache< output_type > my_successors; + bool my_reserved; + bool my_has_cached_item; + output_type my_cached_item; + + // used by apply_body_bypass, can invoke body of node. + bool try_reserve_apply_body(output_type &v) { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_reserved ) { + return false; + } + if ( !my_has_cached_item ) { + tbb::internal::fgt_begin_body( my_body ); + bool r = (*my_body)(my_cached_item); + tbb::internal::fgt_end_body( my_body ); + if (r) { + my_has_cached_item = true; + } + } + if ( my_has_cached_item ) { + v = my_cached_item; + my_reserved = true; + return true; + } else { + return false; + } + } + + // when resetting, and if the source_node was created with my_active == true, then + // when we reset the node we must store a task to run the node, and spawn it only + // after the reset is complete and is_active() is again true. This is why we don't + // test for is_active() here. + task* create_put_task() { + return ( new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal:: source_task_bypass < source_node< output_type > >( *this ) ); + } + + //! Spawns a task that applies the body + void spawn_put( ) { + if(internal::is_graph_active(this->my_graph)) { + internal::spawn_in_graph_arena(this->my_graph, *create_put_task()); + } + } + + friend class internal::source_task_bypass< source_node< output_type > >; + //! Applies the body. Returning SUCCESSFULLY_ENQUEUED okay; forward_task_bypass will handle it. + task * apply_body_bypass( ) { + output_type v; + if ( !try_reserve_apply_body(v) ) + return NULL; + + task *last_task = my_successors.try_put_task(v); + if ( last_task ) + try_consume(); + else + try_release(); + return last_task; + } +}; // class source_node +#endif // TBB_USE_SOURCE_NODE_AS_ALIAS + +//! Implements a function node that supports Input -> Output +template +class function_node + : public graph_node +#if TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + , public internal::function_input< Input, Output, Policy, Allocator > +#else + , public internal::function_input< Input, Output, Policy, cache_aligned_allocator > +#endif + , public internal::function_output { + +#if TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + typedef Allocator internals_allocator; +#else + typedef cache_aligned_allocator internals_allocator; + + __TBB_STATIC_ASSERT( + (tbb::internal::is_same_type::value), + "Allocator template parameter for flow graph nodes is deprecated and will be removed. " + "Specify TBB_DEPRECATED_FLOW_NODE_ALLOCATOR to temporary enable the deprecated interface." + ); +#endif + +public: + typedef Input input_type; + typedef Output output_type; + typedef internal::function_input input_impl_type; + typedef internal::function_input_queue input_queue_type; + typedef internal::function_output fOutput_type; + typedef typename input_impl_type::predecessor_type predecessor_type; + typedef typename fOutput_type::successor_type successor_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename input_impl_type::predecessor_list_type predecessor_list_type; + typedef typename fOutput_type::successor_list_type successor_list_type; +#endif + using input_impl_type::my_predecessors; + + //! Constructor + // input_queue_type is allocated here, but destroyed in the function_input_base. + // TODO: pass the graph_buffer_policy to the function_input_base so it can all + // be done in one place. This would be an interface-breaking change. + template< typename Body > + __TBB_NOINLINE_SYM function_node( graph &g, size_t concurrency, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority )) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Body body, node_priority_t priority = tbb::flow::internal::no_priority )) +#endif + : graph_node(g), input_impl_type(g, concurrency, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority)), + fOutput_type(g) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this), this->my_body ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + function_node( graph& g, size_t concurrency, Body body, node_priority_t priority ) + : function_node(g, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + function_node( const node_set& nodes, size_t concurrency, Body body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy p = Policy(), node_priority_t priority = tbb::flow::internal::no_priority )) + : function_node(nodes.graph_reference(), concurrency, body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(p, priority)) { + make_edges_in_order(nodes, *this); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + function_node( const node_set& nodes, size_t concurrency, Body body, node_priority_t priority ) + : function_node(nodes, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + //! Copy constructor + __TBB_NOINLINE_SYM function_node( const function_node& src ) : + graph_node(src.my_graph), + input_impl_type(src), + fOutput_type(src.my_graph) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_FUNCTION_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this), this->my_body ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract( ) __TBB_override { + my_predecessors.built_predecessors().receiver_extract(*this); + successors().built_successors().sender_extract(*this); + } +#endif + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + using input_impl_type::try_put_task; + + internal::broadcast_cache &successors () __TBB_override { return fOutput_type::my_successors; } + + void reset_node(reset_flags f) __TBB_override { + input_impl_type::reset_function_input(f); + // TODO: use clear() instead. + if(f & rf_clear_edges) { + successors().clear(); + my_predecessors.clear(); + } + __TBB_ASSERT(!(f & rf_clear_edges) || successors().empty(), "function_node successors not empty"); + __TBB_ASSERT(this->my_predecessors.empty(), "function_node predecessors not empty"); + } + +}; // class function_node + +//! implements a function node that supports Input -> (set of outputs) +// Output is a tuple of output types. +template +class multifunction_node : + public graph_node, + public internal::multifunction_input + < + Input, + typename internal::wrap_tuple_elements< + tbb::flow::tuple_size::value, // #elements in tuple + internal::multifunction_output, // wrap this around each element + Output // the tuple providing the types + >::type, + Policy, +#if TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + Allocator +#else + cache_aligned_allocator +#endif + > { +#if TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + typedef Allocator internals_allocator; +#else + typedef cache_aligned_allocator internals_allocator; + + __TBB_STATIC_ASSERT( + (tbb::internal::is_same_type::value), + "Allocator template parameter for flow graph nodes is deprecated and will be removed. " + "Specify TBB_DEPRECATED_FLOW_NODE_ALLOCATOR to temporary enable the deprecated interface." + ); +#endif + +protected: + static const int N = tbb::flow::tuple_size::value; +public: + typedef Input input_type; + typedef null_type output_type; + typedef typename internal::wrap_tuple_elements::type output_ports_type; + typedef internal::multifunction_input< + input_type, output_ports_type, Policy, internals_allocator> input_impl_type; + typedef internal::function_input_queue input_queue_type; +private: + using input_impl_type::my_predecessors; +public: + template + __TBB_NOINLINE_SYM multifunction_node( + graph &g, size_t concurrency, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority ) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body body, node_priority_t priority = tbb::flow::internal::no_priority) +#endif + ) : graph_node(g), input_impl_type(g, concurrency, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority)) { + tbb::internal::fgt_multioutput_node_with_body( + CODEPTR(), tbb::internal::FLOW_MULTIFUNCTION_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body + ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + __TBB_NOINLINE_SYM multifunction_node(graph& g, size_t concurrency, Body body, node_priority_t priority) + : multifunction_node(g, concurrency, body, Policy(), priority) {} +#endif // TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM multifunction_node(const node_set& nodes, size_t concurrency, Body body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy p = Policy(), node_priority_t priority = tbb::flow::internal::no_priority)) + : multifunction_node(nodes.graph_reference(), concurrency, body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(p, priority)) { + make_edges_in_order(nodes, *this); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + __TBB_NOINLINE_SYM multifunction_node(const node_set& nodes, size_t concurrency, Body body, node_priority_t priority) + : multifunction_node(nodes, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + __TBB_NOINLINE_SYM multifunction_node( const multifunction_node &other) : + graph_node(other.my_graph), input_impl_type(other) { + tbb::internal::fgt_multioutput_node_with_body( CODEPTR(), tbb::internal::FLOW_MULTIFUNCTION_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_multioutput_node_desc( this, name ); + } +#endif + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract( ) __TBB_override { + my_predecessors.built_predecessors().receiver_extract(*this); + input_impl_type::extract(); + } +#endif + // all the guts are in multifunction_input... +protected: + void reset_node(reset_flags f) __TBB_override { input_impl_type::reset(f); } +}; // multifunction_node + +//! split_node: accepts a tuple as input, forwards each element of the tuple to its +// successors. The node has unlimited concurrency, so it does not reject inputs. +template +class split_node : public graph_node, public receiver { + static const int N = tbb::flow::tuple_size::value; + typedef receiver base_type; +public: + typedef TupleType input_type; +#if TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + typedef Allocator allocator_type; +#else + __TBB_STATIC_ASSERT( + (tbb::internal::is_same_type::value), + "Allocator template parameter for flow graph nodes is deprecated and will be removed. " + "Specify TBB_DEPRECATED_FLOW_NODE_ALLOCATOR to temporary enable the deprecated interface." + ); +#endif +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename base_type::predecessor_type predecessor_type; + typedef typename base_type::predecessor_list_type predecessor_list_type; + typedef internal::predecessor_cache predecessor_cache_type; + typedef typename predecessor_cache_type::built_predecessors_type built_predecessors_type; +#endif + + typedef typename internal::wrap_tuple_elements< + N, // #elements in tuple + internal::multifunction_output, // wrap this around each element + TupleType // the tuple providing the types + >::type output_ports_type; + + __TBB_NOINLINE_SYM explicit split_node(graph &g) + : graph_node(g), + my_output_ports(internal::init_output_ports::call(g, my_output_ports)) + { + tbb::internal::fgt_multioutput_node(CODEPTR(), tbb::internal::FLOW_SPLIT_NODE, &this->my_graph, + static_cast *>(this), this->output_ports()); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM split_node(const node_set& nodes) : split_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM split_node(const split_node& other) + : graph_node(other.my_graph), base_type(other), + my_output_ports(internal::init_output_ports::call(other.my_graph, my_output_ports)) + { + tbb::internal::fgt_multioutput_node(CODEPTR(), tbb::internal::FLOW_SPLIT_NODE, &this->my_graph, + static_cast *>(this), this->output_ports()); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_multioutput_node_desc( this, name ); + } +#endif + + output_ports_type &output_ports() { return my_output_ports; } + +protected: + task *try_put_task(const TupleType& t) __TBB_override { + // Sending split messages in parallel is not justified, as overheads would prevail. + // Also, we do not have successors here. So we just tell the task returned here is successful. + return internal::emit_element::emit_this(this->my_graph, t, output_ports()); + } + void reset_node(reset_flags f) __TBB_override { + if (f & rf_clear_edges) + internal::clear_element::clear_this(my_output_ports); + + __TBB_ASSERT(!(f & rf_clear_edges) || internal::clear_element::this_empty(my_output_ports), "split_node reset failed"); + } + void reset_receiver(reset_flags /*f*/) __TBB_override {} + graph& graph_reference() const __TBB_override { + return my_graph; + } +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION +private: //! split_node doesn't use this "predecessors" functionality; so, we have "dummies" here; + void extract() __TBB_override {} + + //! Adds to list of predecessors added by make_edge + void internal_add_built_predecessor(predecessor_type&) __TBB_override {} + + //! removes from to list of predecessors (used by remove_edge) + void internal_delete_built_predecessor(predecessor_type&) __TBB_override {} + + size_t predecessor_count() __TBB_override { return 0; } + + void copy_predecessors(predecessor_list_type&) __TBB_override {} + + built_predecessors_type &built_predecessors() __TBB_override { return my_predessors; } + + //! dummy member + built_predecessors_type my_predessors; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +private: + output_ports_type my_output_ports; +}; + +//! Implements an executable node that supports continue_msg -> Output +template > +class continue_node : public graph_node, public internal::continue_input, + public internal::function_output { +public: + typedef continue_msg input_type; + typedef Output output_type; + typedef internal::continue_input input_impl_type; + typedef internal::function_output fOutput_type; + typedef typename input_impl_type::predecessor_type predecessor_type; + typedef typename fOutput_type::successor_type successor_type; + + //! Constructor for executable node with continue_msg -> Output + template + __TBB_NOINLINE_SYM continue_node( + graph &g, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority ) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Body body, node_priority_t priority = tbb::flow::internal::no_priority ) +#endif + ) : graph_node(g), input_impl_type( g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority) ), + fOutput_type(g) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, + + static_cast *>(this), + static_cast *>(this), this->my_body ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + continue_node( graph& g, Body body, node_priority_t priority ) + : continue_node(g, body, Policy(), priority) {} +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + continue_node( const node_set& nodes, Body body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy p = Policy(), node_priority_t priority = tbb::flow::internal::no_priority)) + : continue_node(nodes.graph_reference(), body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(p, priority) ) { + make_edges_in_order(nodes, *this); + } +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + continue_node( const node_set& nodes, Body body, node_priority_t priority) + : continue_node(nodes, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + //! Constructor for executable node with continue_msg -> Output + template + __TBB_NOINLINE_SYM continue_node( + graph &g, int number_of_predecessors, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority ) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1( Body body, node_priority_t priority = tbb::flow::internal::no_priority ) +#endif + ) : graph_node(g) + , input_impl_type(g, number_of_predecessors, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority)), + fOutput_type(g) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, + static_cast *>(this), + static_cast *>(this), this->my_body ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + continue_node( graph& g, int number_of_predecessors, Body body, node_priority_t priority) + : continue_node(g, number_of_predecessors, body, Policy(), priority) {} +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + continue_node( const node_set& nodes, int number_of_predecessors, + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1( Policy p = Policy(), node_priority_t priority = tbb::flow::internal::no_priority )) + : continue_node(nodes.graph_reference(), number_of_predecessors, body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(p, priority)) { + make_edges_in_order(nodes, *this); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + continue_node( const node_set& nodes, int number_of_predecessors, + Body body, node_priority_t priority ) + : continue_node(nodes, number_of_predecessors, body, Policy(), priority) {} +#endif +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM continue_node( const continue_node& src ) : + graph_node(src.my_graph), input_impl_type(src), + internal::function_output(src.my_graph) { + tbb::internal::fgt_node_with_body( CODEPTR(), tbb::internal::FLOW_CONTINUE_NODE, &this->my_graph, + static_cast *>(this), + static_cast *>(this), this->my_body ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract() __TBB_override { + input_impl_type::my_built_predecessors.receiver_extract(*this); + successors().built_successors().sender_extract(*this); + } +#endif + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + using input_impl_type::try_put_task; + internal::broadcast_cache &successors () __TBB_override { return fOutput_type::my_successors; } + + void reset_node(reset_flags f) __TBB_override { + input_impl_type::reset_receiver(f); + if(f & rf_clear_edges)successors().clear(); + __TBB_ASSERT(!(f & rf_clear_edges) || successors().empty(), "continue_node not reset"); + } +}; // continue_node + +//! Forwards messages of type T to all successors +template +class broadcast_node : public graph_node, public receiver, public sender { +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::predecessor_list_type predecessor_list_type; + typedef typename sender::successor_list_type successor_list_type; +#endif +private: + internal::broadcast_cache my_successors; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + internal::edge_container my_built_predecessors; + spin_mutex pred_mutex; // serialize accesses on edge_container +#endif +public: + + __TBB_NOINLINE_SYM explicit broadcast_node(graph& g) : graph_node(g) { + my_successors.set_owner( this ); + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + broadcast_node(const node_set& nodes) : broadcast_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM broadcast_node( const broadcast_node& src ) : + graph_node(src.my_graph), receiver(), sender() + { + my_successors.set_owner( this ); + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_BROADCAST_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + + //! Adds a successor + bool register_successor( successor_type &r ) __TBB_override { + my_successors.register_successor( r ); + return true; + } + + //! Removes s as a successor + bool remove_successor( successor_type &r ) __TBB_override { + my_successors.remove_successor( r ); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename sender::built_successors_type built_successors_type; + + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + + void internal_add_built_successor(successor_type &r) __TBB_override { + my_successors.internal_add_built_successor(r); + } + + void internal_delete_built_successor(successor_type &r) __TBB_override { + my_successors.internal_delete_built_successor(r); + } + + size_t successor_count() __TBB_override { + return my_successors.successor_count(); + } + + void copy_successors(successor_list_type &v) __TBB_override { + my_successors.copy_successors(v); + } + + typedef typename receiver::built_predecessors_type built_predecessors_type; + + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + + void internal_add_built_predecessor( predecessor_type &p) __TBB_override { + spin_mutex::scoped_lock l(pred_mutex); + my_built_predecessors.add_edge(p); + } + + void internal_delete_built_predecessor( predecessor_type &p) __TBB_override { + spin_mutex::scoped_lock l(pred_mutex); + my_built_predecessors.delete_edge(p); + } + + size_t predecessor_count() __TBB_override { + spin_mutex::scoped_lock l(pred_mutex); + return my_built_predecessors.edge_count(); + } + + void copy_predecessors(predecessor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l(pred_mutex); + my_built_predecessors.copy_edges(v); + } + + void extract() __TBB_override { + my_built_predecessors.receiver_extract(*this); + my_successors.built_successors().sender_extract(*this); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + //! build a task to run the successor if possible. Default is old behavior. + task *try_put_task(const T& t) __TBB_override { + task *new_task = my_successors.try_put_task(t); + if (!new_task) new_task = SUCCESSFULLY_ENQUEUED; + return new_task; + } + + graph& graph_reference() const __TBB_override { + return my_graph; + } + + void reset_receiver(reset_flags /*f*/) __TBB_override {} + + void reset_node(reset_flags f) __TBB_override { + if (f&rf_clear_edges) { + my_successors.clear(); +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + my_built_predecessors.clear(); +#endif + } + __TBB_ASSERT(!(f & rf_clear_edges) || my_successors.empty(), "Error resetting broadcast_node"); + } +}; // broadcast_node + +//! Forwards messages in arbitrary order +template +class buffer_node + : public graph_node +#if TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + , public internal::reservable_item_buffer< T, Allocator > +#else + , public internal::reservable_item_buffer< T, cache_aligned_allocator > +#endif + , public receiver, public sender { +#if TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + typedef Allocator internals_allocator; +#else + typedef cache_aligned_allocator internals_allocator; +#endif +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + typedef buffer_node class_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::predecessor_list_type predecessor_list_type; + typedef typename sender::successor_list_type successor_list_type; +#endif +#if !TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + __TBB_STATIC_ASSERT( + (tbb::internal::is_same_type::value), + "Allocator template parameter for flow graph nodes is deprecated and will be removed. " + "Specify TBB_DEPRECATED_FLOW_NODE_ALLOCATOR to temporary enable the deprecated interface." + ); +#endif + +protected: + typedef size_t size_type; + internal::round_robin_cache< T, null_rw_mutex > my_successors; + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + internal::edge_container my_built_predecessors; +#endif + + friend class internal::forward_task_bypass< class_type >; + + enum op_type {reg_succ, rem_succ, req_item, res_item, rel_res, con_res, put_item, try_fwd_task +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + , add_blt_succ, del_blt_succ, + add_blt_pred, del_blt_pred, + blt_succ_cnt, blt_pred_cnt, + blt_succ_cpy, blt_pred_cpy // create vector copies of preds and succs +#endif + }; + + // implements the aggregator_operation concept + class buffer_operation : public internal::aggregated_operation< buffer_operation > { + public: + char type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + task * ltask; + union { + input_type *elem; + successor_type *r; + predecessor_type *p; + size_t cnt_val; + successor_list_type *svec; + predecessor_list_type *pvec; + }; +#else + T *elem; + task * ltask; + successor_type *r; +#endif + buffer_operation(const T& e, op_type t) : type(char(t)) + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + , ltask(NULL), elem(const_cast(&e)) +#else + , elem(const_cast(&e)) , ltask(NULL) +#endif + {} + buffer_operation(op_type t) : type(char(t)), ltask(NULL) {} + }; + + bool forwarder_busy; + typedef internal::aggregating_functor handler_type; + friend class internal::aggregating_functor; + internal::aggregator< handler_type, buffer_operation> my_aggregator; + + virtual void handle_operations(buffer_operation *op_list) { + handle_operations_impl(op_list, this); + } + + template + void handle_operations_impl(buffer_operation *op_list, derived_type* derived) { + __TBB_ASSERT(static_cast(derived) == this, "'this' is not a base class for derived"); + + buffer_operation *tmp = NULL; + bool try_forwarding = false; + while (op_list) { + tmp = op_list; + op_list = op_list->next; + switch (tmp->type) { + case reg_succ: internal_reg_succ(tmp); try_forwarding = true; break; + case rem_succ: internal_rem_succ(tmp); break; + case req_item: internal_pop(tmp); break; + case res_item: internal_reserve(tmp); break; + case rel_res: internal_release(tmp); try_forwarding = true; break; + case con_res: internal_consume(tmp); try_forwarding = true; break; + case put_item: try_forwarding = internal_push(tmp); break; + case try_fwd_task: internal_forward_task(tmp); break; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + // edge recording + case add_blt_succ: internal_add_built_succ(tmp); break; + case del_blt_succ: internal_del_built_succ(tmp); break; + case add_blt_pred: internal_add_built_pred(tmp); break; + case del_blt_pred: internal_del_built_pred(tmp); break; + case blt_succ_cnt: internal_succ_cnt(tmp); break; + case blt_pred_cnt: internal_pred_cnt(tmp); break; + case blt_succ_cpy: internal_copy_succs(tmp); break; + case blt_pred_cpy: internal_copy_preds(tmp); break; +#endif + } + } + + derived->order(); + + if (try_forwarding && !forwarder_busy) { + if(internal::is_graph_active(this->my_graph)) { + forwarder_busy = true; + task *new_task = new(task::allocate_additional_child_of(*(this->my_graph.root_task()))) internal:: + forward_task_bypass(*this); + // tmp should point to the last item handled by the aggregator. This is the operation + // the handling thread enqueued. So modifying that record will be okay. + // workaround for icc bug + tbb::task *z = tmp->ltask; + graph &g = this->my_graph; + tmp->ltask = combine_tasks(g, z, new_task); // in case the op generated a task + } + } + } // handle_operations + + inline task *grab_forwarding_task( buffer_operation &op_data) { + return op_data.ltask; + } + + inline bool enqueue_forwarding_task(buffer_operation &op_data) { + task *ft = grab_forwarding_task(op_data); + if(ft) { + internal::spawn_in_graph_arena(graph_reference(), *ft); + return true; + } + return false; + } + + //! This is executed by an enqueued task, the "forwarder" + virtual task *forward_task() { + buffer_operation op_data(try_fwd_task); + task *last_task = NULL; + do { + op_data.status = internal::WAIT; + op_data.ltask = NULL; + my_aggregator.execute(&op_data); + + // workaround for icc bug + tbb::task *xtask = op_data.ltask; + graph& g = this->my_graph; + last_task = combine_tasks(g, last_task, xtask); + } while (op_data.status ==internal::SUCCEEDED); + return last_task; + } + + //! Register successor + virtual void internal_reg_succ(buffer_operation *op) { + my_successors.register_successor(*(op->r)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + //! Remove successor + virtual void internal_rem_succ(buffer_operation *op) { + my_successors.remove_successor(*(op->r)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename sender::built_successors_type built_successors_type; + + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + + virtual void internal_add_built_succ(buffer_operation *op) { + my_successors.internal_add_built_successor(*(op->r)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + virtual void internal_del_built_succ(buffer_operation *op) { + my_successors.internal_delete_built_successor(*(op->r)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + typedef typename receiver::built_predecessors_type built_predecessors_type; + + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + + virtual void internal_add_built_pred(buffer_operation *op) { + my_built_predecessors.add_edge(*(op->p)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + virtual void internal_del_built_pred(buffer_operation *op) { + my_built_predecessors.delete_edge(*(op->p)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + virtual void internal_succ_cnt(buffer_operation *op) { + op->cnt_val = my_successors.successor_count(); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + virtual void internal_pred_cnt(buffer_operation *op) { + op->cnt_val = my_built_predecessors.edge_count(); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + virtual void internal_copy_succs(buffer_operation *op) { + my_successors.copy_successors(*(op->svec)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + virtual void internal_copy_preds(buffer_operation *op) { + my_built_predecessors.copy_edges(*(op->pvec)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +private: + void order() {} + + bool is_item_valid() { + return this->my_item_valid(this->my_tail - 1); + } + + void try_put_and_add_task(task*& last_task) { + task *new_task = my_successors.try_put_task(this->back()); + if (new_task) { + // workaround for icc bug + graph& g = this->my_graph; + last_task = combine_tasks(g, last_task, new_task); + this->destroy_back(); + } + } + +protected: + //! Tries to forward valid items to successors + virtual void internal_forward_task(buffer_operation *op) { + internal_forward_task_impl(op, this); + } + + template + void internal_forward_task_impl(buffer_operation *op, derived_type* derived) { + __TBB_ASSERT(static_cast(derived) == this, "'this' is not a base class for derived"); + + if (this->my_reserved || !derived->is_item_valid()) { + __TBB_store_with_release(op->status, internal::FAILED); + this->forwarder_busy = false; + return; + } + // Try forwarding, giving each successor a chance + task * last_task = NULL; + size_type counter = my_successors.size(); + for (; counter > 0 && derived->is_item_valid(); --counter) + derived->try_put_and_add_task(last_task); + + op->ltask = last_task; // return task + if (last_task && !counter) { + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + else { + __TBB_store_with_release(op->status, internal::FAILED); + forwarder_busy = false; + } + } + + virtual bool internal_push(buffer_operation *op) { + this->push_back(*(op->elem)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + return true; + } + + virtual void internal_pop(buffer_operation *op) { + if(this->pop_back(*(op->elem))) { + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + else { + __TBB_store_with_release(op->status, internal::FAILED); + } + } + + virtual void internal_reserve(buffer_operation *op) { + if(this->reserve_front(*(op->elem))) { + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + else { + __TBB_store_with_release(op->status, internal::FAILED); + } + } + + virtual void internal_consume(buffer_operation *op) { + this->consume_front(); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + + virtual void internal_release(buffer_operation *op) { + this->release_front(); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + +public: + //! Constructor + __TBB_NOINLINE_SYM explicit buffer_node( graph &g ) + : graph_node(g), internal::reservable_item_buffer(), receiver(), + sender(), forwarder_busy(false) + { + my_successors.set_owner(this); + my_aggregator.initialize_handler(handler_type(this)); + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + buffer_node(const node_set& nodes) : buffer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM buffer_node( const buffer_node& src ) + : graph_node(src.my_graph), internal::reservable_item_buffer(), + receiver(), sender(), forwarder_busy(false) + { + my_successors.set_owner(this); + my_aggregator.initialize_handler(handler_type(this)); + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_BUFFER_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + + // + // message sender implementation + // + + //! Adds a new successor. + /** Adds successor r to the list of successors; may forward tasks. */ + bool register_successor( successor_type &r ) __TBB_override { + buffer_operation op_data(reg_succ); + op_data.r = &r; + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void internal_add_built_successor( successor_type &r) __TBB_override { + buffer_operation op_data(add_blt_succ); + op_data.r = &r; + my_aggregator.execute(&op_data); + } + + void internal_delete_built_successor( successor_type &r) __TBB_override { + buffer_operation op_data(del_blt_succ); + op_data.r = &r; + my_aggregator.execute(&op_data); + } + + void internal_add_built_predecessor( predecessor_type &p) __TBB_override { + buffer_operation op_data(add_blt_pred); + op_data.p = &p; + my_aggregator.execute(&op_data); + } + + void internal_delete_built_predecessor( predecessor_type &p) __TBB_override { + buffer_operation op_data(del_blt_pred); + op_data.p = &p; + my_aggregator.execute(&op_data); + } + + size_t predecessor_count() __TBB_override { + buffer_operation op_data(blt_pred_cnt); + my_aggregator.execute(&op_data); + return op_data.cnt_val; + } + + size_t successor_count() __TBB_override { + buffer_operation op_data(blt_succ_cnt); + my_aggregator.execute(&op_data); + return op_data.cnt_val; + } + + void copy_predecessors( predecessor_list_type &v ) __TBB_override { + buffer_operation op_data(blt_pred_cpy); + op_data.pvec = &v; + my_aggregator.execute(&op_data); + } + + void copy_successors( successor_list_type &v ) __TBB_override { + buffer_operation op_data(blt_succ_cpy); + op_data.svec = &v; + my_aggregator.execute(&op_data); + } + +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + //! Removes a successor. + /** Removes successor r from the list of successors. + It also calls r.remove_predecessor(*this) to remove this node as a predecessor. */ + bool remove_successor( successor_type &r ) __TBB_override { + r.remove_predecessor(*this); + buffer_operation op_data(rem_succ); + op_data.r = &r; + my_aggregator.execute(&op_data); + // even though this operation does not cause a forward, if we are the handler, and + // a forward is scheduled, we may be the first to reach this point after the aggregator, + // and so should check for the task. + (void)enqueue_forwarding_task(op_data); + return true; + } + + //! Request an item from the buffer_node + /** true = v contains the returned item
+ false = no item has been returned */ + bool try_get( T &v ) __TBB_override { + buffer_operation op_data(req_item); + op_data.elem = &v; + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return (op_data.status==internal::SUCCEEDED); + } + + //! Reserves an item. + /** false = no item can be reserved
+ true = an item is reserved */ + bool try_reserve( T &v ) __TBB_override { + buffer_operation op_data(res_item); + op_data.elem = &v; + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return (op_data.status==internal::SUCCEEDED); + } + + //! Release a reserved item. + /** true = item has been released and so remains in sender */ + bool try_release() __TBB_override { + buffer_operation op_data(rel_res); + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return true; + } + + //! Consumes a reserved item. + /** true = item is removed from sender and reservation removed */ + bool try_consume() __TBB_override { + buffer_operation op_data(con_res); + my_aggregator.execute(&op_data); + (void)enqueue_forwarding_task(op_data); + return true; + } + +protected: + + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + //! receive an item, return a task *if possible + task *try_put_task(const T &t) __TBB_override { + buffer_operation op_data(t, put_item); + my_aggregator.execute(&op_data); + task *ft = grab_forwarding_task(op_data); + // sequencer_nodes can return failure (if an item has been previously inserted) + // We have to spawn the returned task if our own operation fails. + + if(ft && op_data.status ==internal::FAILED) { + // we haven't succeeded queueing the item, but for some reason the + // call returned a task (if another request resulted in a successful + // forward this could happen.) Queue the task and reset the pointer. + internal::spawn_in_graph_arena(graph_reference(), *ft); ft = NULL; + } + else if(!ft && op_data.status ==internal::SUCCEEDED) { + ft = SUCCESSFULLY_ENQUEUED; + } + return ft; + } + + graph& graph_reference() const __TBB_override { + return my_graph; + } + + void reset_receiver(reset_flags /*f*/) __TBB_override { } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION +public: + void extract() __TBB_override { + my_built_predecessors.receiver_extract(*this); + my_successors.built_successors().sender_extract(*this); + } +#endif + +protected: + void reset_node( reset_flags f) __TBB_override { + internal::reservable_item_buffer::reset(); + // TODO: just clear structures + if (f&rf_clear_edges) { + my_successors.clear(); +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + my_built_predecessors.clear(); +#endif + } + forwarder_busy = false; + } +}; // buffer_node + +//! Forwards messages in FIFO order +template +class queue_node : public buffer_node { +#if !TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + __TBB_STATIC_ASSERT( + (tbb::internal::is_same_type::value), + "Allocator template parameter for flow graph nodes is deprecated and will be removed. " + "Specify TBB_DEPRECATED_FLOW_NODE_ALLOCATOR to temporary enable the deprecated interface." + ); +#endif +protected: + typedef buffer_node base_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::buffer_operation queue_operation; + typedef queue_node class_type; + +private: + template friend class buffer_node; + + bool is_item_valid() { + return this->my_item_valid(this->my_head); + } + + void try_put_and_add_task(task*& last_task) { + task *new_task = this->my_successors.try_put_task(this->front()); + if (new_task) { + // workaround for icc bug + graph& graph_ref = this->graph_reference(); + last_task = combine_tasks(graph_ref, last_task, new_task); + this->destroy_front(); + } + } + +protected: + void internal_forward_task(queue_operation *op) __TBB_override { + this->internal_forward_task_impl(op, this); + } + + void internal_pop(queue_operation *op) __TBB_override { + if ( this->my_reserved || !this->my_item_valid(this->my_head)){ + __TBB_store_with_release(op->status, internal::FAILED); + } + else { + this->pop_front(*(op->elem)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + } + void internal_reserve(queue_operation *op) __TBB_override { + if (this->my_reserved || !this->my_item_valid(this->my_head)) { + __TBB_store_with_release(op->status, internal::FAILED); + } + else { + this->reserve_front(*(op->elem)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + } + void internal_consume(queue_operation *op) __TBB_override { + this->consume_front(); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + } + +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + __TBB_NOINLINE_SYM explicit queue_node( graph &g ) : base_type(g) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + queue_node( const node_set& nodes) : queue_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM queue_node( const queue_node& src) : base_type(src) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_QUEUE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +protected: + void reset_node( reset_flags f) __TBB_override { + base_type::reset_node(f); + } +}; // queue_node + +//! Forwards messages in sequence order +template< typename T, typename Allocator=__TBB_DEFAULT_NODE_ALLOCATOR(T) > +class sequencer_node : public queue_node { + internal::function_body< T, size_t > *my_sequencer; + // my_sequencer should be a benign function and must be callable + // from a parallel context. Does this mean it needn't be reset? +public: +#if !TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + __TBB_STATIC_ASSERT( + (tbb::internal::is_same_type::value), + "Allocator template parameter for flow graph nodes is deprecated and will be removed. " + "Specify TBB_DEPRECATED_FLOW_NODE_ALLOCATOR to temporary enable the deprecated interface." + ); +#endif + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + template< typename Sequencer > + __TBB_NOINLINE_SYM sequencer_node( graph &g, const Sequencer& s ) : queue_node(g), + my_sequencer(new internal::function_body_leaf< T, size_t, Sequencer>(s) ) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + sequencer_node( const node_set& nodes, const Sequencer& s) + : sequencer_node(nodes.graph_reference(), s) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM sequencer_node( const sequencer_node& src ) : queue_node(src), + my_sequencer( src.my_sequencer->clone() ) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_SEQUENCER_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + + //! Destructor + ~sequencer_node() { delete my_sequencer; } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +protected: + typedef typename buffer_node::size_type size_type; + typedef typename buffer_node::buffer_operation sequencer_operation; + +private: + bool internal_push(sequencer_operation *op) __TBB_override { + size_type tag = (*my_sequencer)(*(op->elem)); +#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES + if (tag < this->my_head) { + // have already emitted a message with this tag + __TBB_store_with_release(op->status, internal::FAILED); + return false; + } +#endif + // cannot modify this->my_tail now; the buffer would be inconsistent. + size_t new_tail = (tag+1 > this->my_tail) ? tag+1 : this->my_tail; + + if (this->size(new_tail) > this->capacity()) { + this->grow_my_array(this->size(new_tail)); + } + this->my_tail = new_tail; + + const internal::op_stat res = this->place_item(tag, *(op->elem)) ? internal::SUCCEEDED : internal::FAILED; + __TBB_store_with_release(op->status, res); + return res ==internal::SUCCEEDED; + } +}; // sequencer_node + +//! Forwards messages in priority order +template, typename Allocator=__TBB_DEFAULT_NODE_ALLOCATOR(T)> +class priority_queue_node : public buffer_node { +public: +#if !TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + __TBB_STATIC_ASSERT( + (tbb::internal::is_same_type::value), + "Allocator template parameter for flow graph nodes is deprecated and will removed in the future. " + "To temporary enable the deprecated interface specify TBB_ENABLE_DEPRECATED_NODE_ALLOCATOR." + ); +#endif + typedef T input_type; + typedef T output_type; + typedef buffer_node base_type; + typedef priority_queue_node class_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + __TBB_NOINLINE_SYM explicit priority_queue_node( graph &g, const Compare& comp = Compare() ) + : buffer_node(g), compare(comp), mark(0) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + priority_queue_node(const node_set& nodes, const Compare& comp = Compare()) + : priority_queue_node(nodes.graph_reference(), comp) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + __TBB_NOINLINE_SYM priority_queue_node( const priority_queue_node &src ) + : buffer_node(src), mark(0) + { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_PRIORITY_QUEUE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +protected: + + void reset_node( reset_flags f) __TBB_override { + mark = 0; + base_type::reset_node(f); + } + + typedef typename buffer_node::size_type size_type; + typedef typename buffer_node::item_type item_type; + typedef typename buffer_node::buffer_operation prio_operation; + + //! Tries to forward valid items to successors + void internal_forward_task(prio_operation *op) __TBB_override { + this->internal_forward_task_impl(op, this); + } + + void handle_operations(prio_operation *op_list) __TBB_override { + this->handle_operations_impl(op_list, this); + } + + bool internal_push(prio_operation *op) __TBB_override { + prio_push(*(op->elem)); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + return true; + } + + void internal_pop(prio_operation *op) __TBB_override { + // if empty or already reserved, don't pop + if ( this->my_reserved == true || this->my_tail == 0 ) { + __TBB_store_with_release(op->status, internal::FAILED); + return; + } + + *(op->elem) = prio(); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + prio_pop(); + + } + + // pops the highest-priority item, saves copy + void internal_reserve(prio_operation *op) __TBB_override { + if (this->my_reserved == true || this->my_tail == 0) { + __TBB_store_with_release(op->status, internal::FAILED); + return; + } + this->my_reserved = true; + *(op->elem) = prio(); + reserved_item = *(op->elem); + __TBB_store_with_release(op->status, internal::SUCCEEDED); + prio_pop(); + } + + void internal_consume(prio_operation *op) __TBB_override { + __TBB_store_with_release(op->status, internal::SUCCEEDED); + this->my_reserved = false; + reserved_item = input_type(); + } + + void internal_release(prio_operation *op) __TBB_override { + __TBB_store_with_release(op->status, internal::SUCCEEDED); + prio_push(reserved_item); + this->my_reserved = false; + reserved_item = input_type(); + } + +private: + template friend class buffer_node; + + void order() { + if (mark < this->my_tail) heapify(); + __TBB_ASSERT(mark == this->my_tail, "mark unequal after heapify"); + } + + bool is_item_valid() { + return this->my_tail > 0; + } + + void try_put_and_add_task(task*& last_task) { + task * new_task = this->my_successors.try_put_task(this->prio()); + if (new_task) { + // workaround for icc bug + graph& graph_ref = this->graph_reference(); + last_task = combine_tasks(graph_ref, last_task, new_task); + prio_pop(); + } + } + +private: + Compare compare; + size_type mark; + + input_type reserved_item; + + // in case a reheap has not been done after a push, check if the mark item is higher than the 0'th item + bool prio_use_tail() { + __TBB_ASSERT(mark <= this->my_tail, "mark outside bounds before test"); + return mark < this->my_tail && compare(this->get_my_item(0), this->get_my_item(this->my_tail - 1)); + } + + // prio_push: checks that the item will fit, expand array if necessary, put at end + void prio_push(const T &src) { + if ( this->my_tail >= this->my_array_size ) + this->grow_my_array( this->my_tail + 1 ); + (void) this->place_item(this->my_tail, src); + ++(this->my_tail); + __TBB_ASSERT(mark < this->my_tail, "mark outside bounds after push"); + } + + // prio_pop: deletes highest priority item from the array, and if it is item + // 0, move last item to 0 and reheap. If end of array, just destroy and decrement tail + // and mark. Assumes the array has already been tested for emptiness; no failure. + void prio_pop() { + if (prio_use_tail()) { + // there are newly pushed elements; last one higher than top + // copy the data + this->destroy_item(this->my_tail-1); + --(this->my_tail); + __TBB_ASSERT(mark <= this->my_tail, "mark outside bounds after pop"); + return; + } + this->destroy_item(0); + if(this->my_tail > 1) { + // push the last element down heap + __TBB_ASSERT(this->my_item_valid(this->my_tail - 1), NULL); + this->move_item(0,this->my_tail - 1); + } + --(this->my_tail); + if(mark > this->my_tail) --mark; + if (this->my_tail > 1) // don't reheap for heap of size 1 + reheap(); + __TBB_ASSERT(mark <= this->my_tail, "mark outside bounds after pop"); + } + + const T& prio() { + return this->get_my_item(prio_use_tail() ? this->my_tail-1 : 0); + } + + // turn array into heap + void heapify() { + if(this->my_tail == 0) { + mark = 0; + return; + } + if (!mark) mark = 1; + for (; markmy_tail; ++mark) { // for each unheaped element + size_type cur_pos = mark; + input_type to_place; + this->fetch_item(mark,to_place); + do { // push to_place up the heap + size_type parent = (cur_pos-1)>>1; + if (!compare(this->get_my_item(parent), to_place)) + break; + this->move_item(cur_pos, parent); + cur_pos = parent; + } while( cur_pos ); + (void) this->place_item(cur_pos, to_place); + } + } + + // otherwise heapified array with new root element; rearrange to heap + void reheap() { + size_type cur_pos=0, child=1; + while (child < mark) { + size_type target = child; + if (child+1get_my_item(child), + this->get_my_item(child+1))) + ++target; + // target now has the higher priority child + if (compare(this->get_my_item(target), + this->get_my_item(cur_pos))) + break; + // swap + this->swap_items(cur_pos, target); + cur_pos = target; + child = (cur_pos<<1)+1; + } + } +}; // priority_queue_node + +} // interfaceX + +namespace interface11 { + +//! Forwards messages only if the threshold has not been reached +/** This node forwards items until its threshold is reached. + It contains no buffering. If the downstream node rejects, the + message is dropped. */ +template< typename T, typename DecrementType=continue_msg > +class limiter_node : public graph_node, public receiver< T >, public sender< T > { +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::built_predecessors_type built_predecessors_type; + typedef typename sender::built_successors_type built_successors_type; + typedef typename receiver::predecessor_list_type predecessor_list_type; + typedef typename sender::successor_list_type successor_list_type; +#endif + //TODO: There is a lack of predefined types for its controlling "decrementer" port. It should be fixed later. + +private: + size_t my_threshold; + size_t my_count; //number of successful puts + size_t my_tries; //number of active put attempts + internal::reservable_predecessor_cache< T, spin_mutex > my_predecessors; + spin_mutex my_mutex; + internal::broadcast_cache< T > my_successors; + __TBB_DEPRECATED_LIMITER_EXPR( int init_decrement_predecessors; ) + + friend class internal::forward_task_bypass< limiter_node >; + + // Let decrementer call decrement_counter() + friend class internal::decrementer< limiter_node, DecrementType >; + + bool check_conditions() { // always called under lock + return ( my_count + my_tries < my_threshold && !my_predecessors.empty() && !my_successors.empty() ); + } + + // only returns a valid task pointer or NULL, never SUCCESSFULLY_ENQUEUED + task *forward_task() { + input_type v; + task *rval = NULL; + bool reserved = false; + { + spin_mutex::scoped_lock lock(my_mutex); + if ( check_conditions() ) + ++my_tries; + else + return NULL; + } + + //SUCCESS + // if we can reserve and can put, we consume the reservation + // we increment the count and decrement the tries + if ( (my_predecessors.try_reserve(v)) == true ){ + reserved=true; + if ( (rval = my_successors.try_put_task(v)) != NULL ){ + { + spin_mutex::scoped_lock lock(my_mutex); + ++my_count; + --my_tries; + my_predecessors.try_consume(); + if ( check_conditions() ) { + if ( internal::is_graph_active(this->my_graph) ) { + task *rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal::forward_task_bypass< limiter_node >( *this ); + internal::spawn_in_graph_arena(graph_reference(), *rtask); + } + } + } + return rval; + } + } + //FAILURE + //if we can't reserve, we decrement the tries + //if we can reserve but can't put, we decrement the tries and release the reservation + { + spin_mutex::scoped_lock lock(my_mutex); + --my_tries; + if (reserved) my_predecessors.try_release(); + if ( check_conditions() ) { + if ( internal::is_graph_active(this->my_graph) ) { + task *rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal::forward_task_bypass< limiter_node >( *this ); + __TBB_ASSERT(!rval, "Have two tasks to handle"); + return rtask; + } + } + return rval; + } + } + + void forward() { + __TBB_ASSERT(false, "Should never be called"); + return; + } + + task* decrement_counter( long long delta ) { + { + spin_mutex::scoped_lock lock(my_mutex); + if( delta > 0 && size_t(delta) > my_count ) + my_count = 0; + else if( delta < 0 && size_t(delta) > my_threshold - my_count ) + my_count = my_threshold; + else + my_count -= size_t(delta); // absolute value of delta is sufficiently small + } + return forward_task(); + } + + void initialize() { + my_predecessors.set_owner(this); + my_successors.set_owner(this); + decrement.set_owner(this); + tbb::internal::fgt_node( + CODEPTR(), tbb::internal::FLOW_LIMITER_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(&decrement), + static_cast *>(this) + ); + } +public: + //! The internal receiver< DecrementType > that decrements the count + internal::decrementer< limiter_node, DecrementType > decrement; + +#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR + __TBB_STATIC_ASSERT( (tbb::internal::is_same_type::value), + "Deprecated interface of the limiter node can be used only in conjunction " + "with continue_msg as the type of DecrementType template parameter." ); +#endif // Check for incompatible interface + + //! Constructor + limiter_node(graph &g, + __TBB_DEPRECATED_LIMITER_ARG2(size_t threshold, int num_decrement_predecessors=0)) + : graph_node(g), my_threshold(threshold), my_count(0), + __TBB_DEPRECATED_LIMITER_ARG4( + my_tries(0), decrement(), + init_decrement_predecessors(num_decrement_predecessors), + decrement(num_decrement_predecessors)) { + initialize(); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + limiter_node(const node_set& nodes, size_t threshold) + : limiter_node(nodes.graph_reference(), threshold) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor + limiter_node( const limiter_node& src ) : + graph_node(src.my_graph), receiver(), sender(), + my_threshold(src.my_threshold), my_count(0), + __TBB_DEPRECATED_LIMITER_ARG4( + my_tries(0), decrement(), + init_decrement_predecessors(src.init_decrement_predecessors), + decrement(src.init_decrement_predecessors)) { + initialize(); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + + //! Replace the current successor with this new successor + bool register_successor( successor_type &r ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + bool was_empty = my_successors.empty(); + my_successors.register_successor(r); + //spawn a forward task if this is the only successor + if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) { + if ( internal::is_graph_active(this->my_graph) ) { + task* task = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal::forward_task_bypass < limiter_node >( *this ); + internal::spawn_in_graph_arena(graph_reference(), *task); + } + } + return true; + } + + //! Removes a successor from this node + /** r.remove_predecessor(*this) is also called. */ + bool remove_successor( successor_type &r ) __TBB_override { + r.remove_predecessor(*this); + my_successors.remove_successor(r); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + built_predecessors_type &built_predecessors() __TBB_override { return my_predecessors.built_predecessors(); } + + void internal_add_built_successor(successor_type &src) __TBB_override { + my_successors.internal_add_built_successor(src); + } + + void internal_delete_built_successor(successor_type &src) __TBB_override { + my_successors.internal_delete_built_successor(src); + } + + size_t successor_count() __TBB_override { return my_successors.successor_count(); } + + void copy_successors(successor_list_type &v) __TBB_override { + my_successors.copy_successors(v); + } + + void internal_add_built_predecessor(predecessor_type &src) __TBB_override { + my_predecessors.internal_add_built_predecessor(src); + } + + void internal_delete_built_predecessor(predecessor_type &src) __TBB_override { + my_predecessors.internal_delete_built_predecessor(src); + } + + size_t predecessor_count() __TBB_override { return my_predecessors.predecessor_count(); } + + void copy_predecessors(predecessor_list_type &v) __TBB_override { + my_predecessors.copy_predecessors(v); + } + + void extract() __TBB_override { + my_count = 0; + my_successors.built_successors().sender_extract(*this); + my_predecessors.built_predecessors().receiver_extract(*this); + decrement.built_predecessors().receiver_extract(decrement); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + //! Adds src to the list of cached predecessors. + bool register_predecessor( predecessor_type &src ) __TBB_override { + spin_mutex::scoped_lock lock(my_mutex); + my_predecessors.add( src ); + if ( my_count + my_tries < my_threshold && !my_successors.empty() && internal::is_graph_active(this->my_graph) ) { + task* task = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal::forward_task_bypass < limiter_node >( *this ); + internal::spawn_in_graph_arena(graph_reference(), *task); + } + return true; + } + + //! Removes src from the list of cached predecessors. + bool remove_predecessor( predecessor_type &src ) __TBB_override { + my_predecessors.remove( src ); + return true; + } + +protected: + + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + //! Puts an item to this receiver + task *try_put_task( const T &t ) __TBB_override { + { + spin_mutex::scoped_lock lock(my_mutex); + if ( my_count + my_tries >= my_threshold ) + return NULL; + else + ++my_tries; + } + + task * rtask = my_successors.try_put_task(t); + + if ( !rtask ) { // try_put_task failed. + spin_mutex::scoped_lock lock(my_mutex); + --my_tries; + if (check_conditions() && internal::is_graph_active(this->my_graph)) { + rtask = new ( task::allocate_additional_child_of( *(this->my_graph.root_task()) ) ) + internal::forward_task_bypass< limiter_node >( *this ); + } + } + else { + spin_mutex::scoped_lock lock(my_mutex); + ++my_count; + --my_tries; + } + return rtask; + } + + graph& graph_reference() const __TBB_override { return my_graph; } + + void reset_receiver(reset_flags /*f*/) __TBB_override { + __TBB_ASSERT(false,NULL); // should never be called + } + + void reset_node( reset_flags f) __TBB_override { + my_count = 0; + if(f & rf_clear_edges) { + my_predecessors.clear(); + my_successors.clear(); + } + else + { + my_predecessors.reset( ); + } + decrement.reset_receiver(f); + } +}; // limiter_node + +#include "internal/_flow_graph_join_impl.h" + +using internal::reserving_port; +using internal::queueing_port; +using internal::key_matching_port; +using internal::input_port; +using internal::tag_value; + +template class join_node; + +template +class join_node: public internal::unfolded_join_node::value, reserving_port, OutputTuple, reserving> { +private: + static const int N = tbb::flow::tuple_size::value; + typedef typename internal::unfolded_join_node unfolded_type; +public: + typedef OutputTuple output_type; + typedef typename unfolded_type::input_ports_type input_ports_type; + __TBB_NOINLINE_SYM explicit join_node(graph &g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM join_node(const node_set& nodes, reserving = reserving()) : join_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_RESERVING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +}; + +template +class join_node: public internal::unfolded_join_node::value, queueing_port, OutputTuple, queueing> { +private: + static const int N = tbb::flow::tuple_size::value; + typedef typename internal::unfolded_join_node unfolded_type; +public: + typedef OutputTuple output_type; + typedef typename unfolded_type::input_ports_type input_ports_type; + __TBB_NOINLINE_SYM explicit join_node(graph &g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM join_node(const node_set& nodes, queueing = queueing()) : join_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_QUEUEING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +}; + +// template for key_matching join_node +// tag_matching join_node is a specialization of key_matching, and is source-compatible. +template +class join_node > : public internal::unfolded_join_node::value, + key_matching_port, OutputTuple, key_matching > { +private: + static const int N = tbb::flow::tuple_size::value; + typedef typename internal::unfolded_join_node > unfolded_type; +public: + typedef OutputTuple output_type; + typedef typename unfolded_type::input_ports_type input_ports_type; + +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + join_node(graph &g) : unfolded_type(g) {} + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + join_node(const node_set& nodes, key_matching = key_matching()) + : join_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1) : unfolded_type(g, b0, b1) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2) : unfolded_type(g, b0, b1, b2) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3) : unfolded_type(g, b0, b1, b2, b3) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4) : + unfolded_type(g, b0, b1, b2, b3, b4) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#if __TBB_VARIADIC_MAX >= 6 + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5) : + unfolded_type(g, b0, b1, b2, b3, b4, b5) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif +#if __TBB_VARIADIC_MAX >= 7 + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6) : + unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif +#if __TBB_VARIADIC_MAX >= 8 + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif +#if __TBB_VARIADIC_MAX >= 9 + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_B7 b7, __TBB_B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif +#if __TBB_VARIADIC_MAX >= 10 + template + __TBB_NOINLINE_SYM join_node(graph &g, __TBB_B0 b0, __TBB_B1 b1, __TBB_B2 b2, __TBB_B3 b3, __TBB_B4 b4, __TBB_B5 b5, __TBB_B6 b6, + __TBB_B7 b7, __TBB_B8 b8, __TBB_B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM join_node(const node_set& nodes, Bodies... bodies) + : join_node(nodes.graph_reference(), bodies...) { + make_edges_in_order(nodes, *this); + } +#endif + + __TBB_NOINLINE_SYM join_node(const join_node &other) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_JOIN_NODE_TAG_MATCHING, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +}; + +// indexer node +#include "internal/_flow_graph_indexer_impl.h" + +// TODO: Implement interface with variadic template or tuple +template class indexer_node; + +//indexer node specializations +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 1; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; + +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 2; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; + +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 3; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; + +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 4; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; + +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 5; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; + +#if __TBB_VARIADIC_MAX >= 6 +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 6; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; +#endif //variadic max 6 + +#if __TBB_VARIADIC_MAX >= 7 +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 7; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; +#endif //variadic max 7 + +#if __TBB_VARIADIC_MAX >= 8 +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 8; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; +#endif //variadic max 8 + +#if __TBB_VARIADIC_MAX >= 9 +template +class indexer_node : public internal::unfolded_indexer_node > { +private: + static const int N = 9; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; +#endif //variadic max 9 + +#if __TBB_VARIADIC_MAX >= 10 +template +class indexer_node/*default*/ : public internal::unfolded_indexer_node > { +private: + static const int N = 10; +public: + typedef tuple InputTuple; + typedef typename internal::tagged_msg output_type; + typedef typename internal::unfolded_indexer_node unfolded_type; + __TBB_NOINLINE_SYM indexer_node(graph& g) : unfolded_type(g) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + indexer_node(const node_set& nodes) : indexer_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + // Copy constructor + __TBB_NOINLINE_SYM indexer_node( const indexer_node& other ) : unfolded_type(other) { + tbb::internal::fgt_multiinput_node( CODEPTR(), tbb::internal::FLOW_INDEXER_NODE, &this->my_graph, + this->input_ports(), static_cast< sender< output_type > *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif +}; +#endif //variadic max 10 + +#if __TBB_PREVIEW_ASYNC_MSG +inline void internal_make_edge( internal::untyped_sender &p, internal::untyped_receiver &s ) { +#else +template< typename T > +inline void internal_make_edge( sender &p, receiver &s ) { +#endif +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + s.internal_add_built_predecessor(p); + p.internal_add_built_successor(s); +#endif + p.register_successor( s ); + tbb::internal::fgt_make_edge( &p, &s ); +} + +//! Makes an edge between a single predecessor and a single successor +template< typename T > +inline void make_edge( sender &p, receiver &s ) { + internal_make_edge( p, s ); +} + +#if __TBB_PREVIEW_ASYNC_MSG +template< typename TS, typename TR, + typename = typename tbb::internal::enable_if::value + || tbb::internal::is_same_type::value>::type> +inline void make_edge( TS &p, TR &s ) { + internal_make_edge( p, s ); +} + +template< typename T > +inline void make_edge( sender &p, receiver &s ) { + internal_make_edge( p, s ); +} + +template< typename T > +inline void make_edge( sender &p, receiver &s ) { + internal_make_edge( p, s ); +} + +#endif // __TBB_PREVIEW_ASYNC_MSG + +#if __TBB_FLOW_GRAPH_CPP11_FEATURES +//Makes an edge from port 0 of a multi-output predecessor to port 0 of a multi-input successor. +template< typename T, typename V, + typename = typename T::output_ports_type, typename = typename V::input_ports_type > +inline void make_edge( T& output, V& input) { + make_edge(get<0>(output.output_ports()), get<0>(input.input_ports())); +} + +//Makes an edge from port 0 of a multi-output predecessor to a receiver. +template< typename T, typename R, + typename = typename T::output_ports_type > +inline void make_edge( T& output, receiver& input) { + make_edge(get<0>(output.output_ports()), input); +} + +//Makes an edge from a sender to port 0 of a multi-input successor. +template< typename S, typename V, + typename = typename V::input_ports_type > +inline void make_edge( sender& output, V& input) { + make_edge(output, get<0>(input.input_ports())); +} +#endif + +#if __TBB_PREVIEW_ASYNC_MSG +inline void internal_remove_edge( internal::untyped_sender &p, internal::untyped_receiver &s ) { +#else +template< typename T > +inline void internal_remove_edge( sender &p, receiver &s ) { +#endif + p.remove_successor( s ); +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + // TODO: should we try to remove p from the predecessor list of s, in case the edge is reversed? + p.internal_delete_built_successor(s); + s.internal_delete_built_predecessor(p); +#endif + tbb::internal::fgt_remove_edge( &p, &s ); +} + +//! Removes an edge between a single predecessor and a single successor +template< typename T > +inline void remove_edge( sender &p, receiver &s ) { + internal_remove_edge( p, s ); +} + +#if __TBB_PREVIEW_ASYNC_MSG +template< typename TS, typename TR, + typename = typename tbb::internal::enable_if::value + || tbb::internal::is_same_type::value>::type> +inline void remove_edge( TS &p, TR &s ) { + internal_remove_edge( p, s ); +} + +template< typename T > +inline void remove_edge( sender &p, receiver &s ) { + internal_remove_edge( p, s ); +} + +template< typename T > +inline void remove_edge( sender &p, receiver &s ) { + internal_remove_edge( p, s ); +} +#endif // __TBB_PREVIEW_ASYNC_MSG + +#if __TBB_FLOW_GRAPH_CPP11_FEATURES +//Removes an edge between port 0 of a multi-output predecessor and port 0 of a multi-input successor. +template< typename T, typename V, + typename = typename T::output_ports_type, typename = typename V::input_ports_type > +inline void remove_edge( T& output, V& input) { + remove_edge(get<0>(output.output_ports()), get<0>(input.input_ports())); +} + +//Removes an edge between port 0 of a multi-output predecessor and a receiver. +template< typename T, typename R, + typename = typename T::output_ports_type > +inline void remove_edge( T& output, receiver& input) { + remove_edge(get<0>(output.output_ports()), input); +} +//Removes an edge between a sender and port 0 of a multi-input successor. +template< typename S, typename V, + typename = typename V::input_ports_type > +inline void remove_edge( sender& output, V& input) { + remove_edge(output, get<0>(input.input_ports())); +} +#endif + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION +template +template< typename S > +void internal::edge_container::sender_extract( S &s ) { + edge_list_type e = built_edges; + for ( typename edge_list_type::iterator i = e.begin(); i != e.end(); ++i ) { + remove_edge(s, **i); + } +} + +template +template< typename R > +void internal::edge_container::receiver_extract( R &r ) { + edge_list_type e = built_edges; + for ( typename edge_list_type::iterator i = e.begin(); i != e.end(); ++i ) { + remove_edge(**i, r); + } +} +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +//! Returns a copy of the body from a function or continue node +template< typename Body, typename Node > +Body copy_body( Node &n ) { + return n.template copy_function_object(); +} + +#if __TBB_FLOW_GRAPH_CPP11_FEATURES + +//composite_node +template< typename InputTuple, typename OutputTuple > class composite_node; + +template< typename... InputTypes, typename... OutputTypes> +class composite_node , tbb::flow::tuple > : public graph_node{ + +public: + typedef tbb::flow::tuple< receiver&... > input_ports_type; + typedef tbb::flow::tuple< sender&... > output_ports_type; + +private: + std::unique_ptr my_input_ports; + std::unique_ptr my_output_ports; + + static const size_t NUM_INPUTS = sizeof...(InputTypes); + static const size_t NUM_OUTPUTS = sizeof...(OutputTypes); + +protected: + void reset_node(reset_flags) __TBB_override {} + +public: +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + composite_node( graph &g, const char *type_name = "composite_node" ) : graph_node(g) { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_COMPOSITE_NODE, this, &this->my_graph ); + tbb::internal::fgt_multiinput_multioutput_node_desc( this, type_name ); + } +#else + composite_node( graph &g ) : graph_node(g) { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_COMPOSITE_NODE, this, &this->my_graph ); + } +#endif + + template + void set_external_ports(T1&& input_ports_tuple, T2&& output_ports_tuple) { + __TBB_STATIC_ASSERT(NUM_INPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of input ports"); + __TBB_STATIC_ASSERT(NUM_OUTPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of output ports"); + my_input_ports = tbb::internal::make_unique(std::forward(input_ports_tuple)); + my_output_ports = tbb::internal::make_unique(std::forward(output_ports_tuple)); + + tbb::internal::fgt_internal_input_alias_helper::alias_port( this, input_ports_tuple); + tbb::internal::fgt_internal_output_alias_helper::alias_port( this, output_ports_tuple); + } + + template< typename... NodeTypes > + void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); } + + template< typename... NodeTypes > + void add_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_multiinput_multioutput_node_desc( this, name ); + } +#endif + + input_ports_type& input_ports() { + __TBB_ASSERT(my_input_ports, "input ports not set, call set_external_ports to set input ports"); + return *my_input_ports; + } + + output_ports_type& output_ports() { + __TBB_ASSERT(my_output_ports, "output ports not set, call set_external_ports to set output ports"); + return *my_output_ports; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract() __TBB_override { + __TBB_ASSERT(false, "Current composite_node implementation does not support extract"); + } +#endif +}; // class composite_node + +//composite_node with only input ports +template< typename... InputTypes> +class composite_node , tbb::flow::tuple<> > : public graph_node { +public: + typedef tbb::flow::tuple< receiver&... > input_ports_type; + +private: + std::unique_ptr my_input_ports; + static const size_t NUM_INPUTS = sizeof...(InputTypes); + +protected: + void reset_node(reset_flags) __TBB_override {} + +public: +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g) { + tbb::internal::fgt_composite( CODEPTR(), this, &g ); + tbb::internal::fgt_multiinput_multioutput_node_desc( this, type_name ); + } +#else + composite_node( graph &g ) : graph_node(g) { + tbb::internal::fgt_composite( CODEPTR(), this, &g ); + } +#endif + + template + void set_external_ports(T&& input_ports_tuple) { + __TBB_STATIC_ASSERT(NUM_INPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of input ports"); + + my_input_ports = tbb::internal::make_unique(std::forward(input_ports_tuple)); + + tbb::internal::fgt_internal_input_alias_helper::alias_port( this, std::forward(input_ports_tuple)); + } + + template< typename... NodeTypes > + void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); } + + template< typename... NodeTypes > + void add_nodes( const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_multiinput_multioutput_node_desc( this, name ); + } +#endif + + input_ports_type& input_ports() { + __TBB_ASSERT(my_input_ports, "input ports not set, call set_external_ports to set input ports"); + return *my_input_ports; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract() __TBB_override { + __TBB_ASSERT(false, "Current composite_node implementation does not support extract"); + } +#endif + +}; // class composite_node + +//composite_nodes with only output_ports +template +class composite_node , tbb::flow::tuple > : public graph_node { +public: + typedef tbb::flow::tuple< sender&... > output_ports_type; + +private: + std::unique_ptr my_output_ports; + static const size_t NUM_OUTPUTS = sizeof...(OutputTypes); + +protected: + void reset_node(reset_flags) __TBB_override {} + +public: +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + __TBB_NOINLINE_SYM composite_node( graph &g, const char *type_name = "composite_node") : graph_node(g) { + tbb::internal::fgt_composite( CODEPTR(), this, &g ); + tbb::internal::fgt_multiinput_multioutput_node_desc( this, type_name ); + } +#else + __TBB_NOINLINE_SYM composite_node( graph &g ) : graph_node(g) { + tbb::internal::fgt_composite( CODEPTR(), this, &g ); + } +#endif + + template + void set_external_ports(T&& output_ports_tuple) { + __TBB_STATIC_ASSERT(NUM_OUTPUTS == tbb::flow::tuple_size::value, "number of arguments does not match number of output ports"); + + my_output_ports = tbb::internal::make_unique(std::forward(output_ports_tuple)); + + tbb::internal::fgt_internal_output_alias_helper::alias_port( this, std::forward(output_ports_tuple)); + } + + template + void add_visible_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, true, n...); } + + template + void add_nodes(const NodeTypes&... n) { internal::add_nodes_impl(this, false, n...); } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_multiinput_multioutput_node_desc( this, name ); + } +#endif + + output_ports_type& output_ports() { + __TBB_ASSERT(my_output_ports, "output ports not set, call set_external_ports to set output ports"); + return *my_output_ports; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract() __TBB_override { + __TBB_ASSERT(false, "Current composite_node implementation does not support extract"); + } +#endif + +}; // class composite_node + +#endif // __TBB_FLOW_GRAPH_CPP11_FEATURES + +namespace internal { + +template +class async_body_base: tbb::internal::no_assign { +public: + typedef Gateway gateway_type; + + async_body_base(gateway_type *gateway): my_gateway(gateway) { } + void set_gateway(gateway_type *gateway) { + my_gateway = gateway; + } + +protected: + gateway_type *my_gateway; +}; + +template +class async_body: public async_body_base { +public: + typedef async_body_base base_type; + typedef Gateway gateway_type; + + async_body(const Body &body, gateway_type *gateway) + : base_type(gateway), my_body(body) { } + + void operator()( const Input &v, Ports & ) { + my_body(v, *this->my_gateway); + } + + Body get_body() { return my_body; } + +private: + Body my_body; +}; + +} // namespace internal + +} // namespace interfaceX +namespace interface11 { + +//! Implements async node +template < typename Input, typename Output, + typename Policy = queueing_lightweight, + typename Allocator=__TBB_DEFAULT_NODE_ALLOCATOR(Input) > +class async_node + : public multifunction_node< Input, tuple< Output >, Policy, Allocator >, public sender< Output > +{ +#if !TBB_DEPRECATED_FLOW_NODE_ALLOCATOR + __TBB_STATIC_ASSERT( + (tbb::internal::is_same_type::value), + "Allocator template parameter for flow graph nodes is deprecated and will removed in the future. " + "To temporary enable the deprecated interface specify TBB_ENABLE_DEPRECATED_NODE_ALLOCATOR." + ); +#endif + typedef multifunction_node< Input, tuple< Output >, Policy, Allocator > base_type; + typedef typename internal::multifunction_input mfn_input_type; + +public: + typedef Input input_type; + typedef Output output_type; + typedef receiver receiver_type; + typedef typename receiver_type::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + typedef receiver_gateway gateway_type; + typedef internal::async_body_base async_body_base_type; + typedef typename base_type::output_ports_type output_ports_type; + +private: + struct try_put_functor { + typedef internal::multifunction_output output_port_type; + output_port_type *port; + // TODO: pass value by copy since we do not want to block asynchronous thread. + const Output *value; + bool result; + try_put_functor(output_port_type &p, const Output &v) : port(&p), value(&v), result(false) { } + void operator()() { + result = port->try_put(*value); + } + }; + + class receiver_gateway_impl: public receiver_gateway { + public: + receiver_gateway_impl(async_node* node): my_node(node) {} + void reserve_wait() __TBB_override { + tbb::internal::fgt_async_reserve(static_cast(my_node), &my_node->my_graph); + my_node->my_graph.reserve_wait(); + } + + void release_wait() __TBB_override { + my_node->my_graph.release_wait(); + tbb::internal::fgt_async_commit(static_cast(my_node), &my_node->my_graph); + } + + //! Implements gateway_type::try_put for an external activity to submit a message to FG + bool try_put(const Output &i) __TBB_override { + return my_node->try_put_impl(i); + } + + private: + async_node* my_node; + } my_gateway; + + //The substitute of 'this' for member construction, to prevent compiler warnings + async_node* self() { return this; } + + //! Implements gateway_type::try_put for an external activity to submit a message to FG + bool try_put_impl(const Output &i) { + internal::multifunction_output &port_0 = internal::output_port<0>(*this); + internal::broadcast_cache& port_successors = port_0.successors(); + tbb::internal::fgt_async_try_put_begin(this, &port_0); + task_list tasks; + bool is_at_least_one_put_successful = port_successors.gather_successful_try_puts(i, tasks); + __TBB_ASSERT( is_at_least_one_put_successful || tasks.empty(), + "Return status is inconsistent with the method operation." ); + + while( !tasks.empty() ) { + internal::enqueue_in_graph_arena(this->my_graph, tasks.pop_front()); + } + tbb::internal::fgt_async_try_put_end(this, &port_0); + return is_at_least_one_put_successful; + } + +public: + template + __TBB_NOINLINE_SYM async_node( + graph &g, size_t concurrency, +#if __TBB_CPP11_PRESENT + Body body, __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority) +#else + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body body, node_priority_t priority = tbb::flow::internal::no_priority) +#endif + ) : base_type( + g, concurrency, + internal::async_body + (body, &my_gateway) __TBB_FLOW_GRAPH_PRIORITY_ARG0(priority) ), my_gateway(self()) { + tbb::internal::fgt_multioutput_node_with_body<1>( + CODEPTR(), tbb::internal::FLOW_ASYNC_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body + ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES && __TBB_CPP11_PRESENT + template + __TBB_NOINLINE_SYM async_node(graph& g, size_t concurrency, Body body, node_priority_t priority) + : async_node(g, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + __TBB_NOINLINE_SYM async_node( + const node_set& nodes, size_t concurrency, Body body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy = Policy(), node_priority_t priority = tbb::flow::internal::no_priority) + ) : async_node(nodes.graph_reference(), concurrency, __TBB_FLOW_GRAPH_PRIORITY_ARG1(body, priority)) { + make_edges_in_order(nodes, *this); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + template + __TBB_NOINLINE_SYM async_node(const node_set& nodes, size_t concurrency, Body body, node_priority_t priority) + : async_node(nodes, concurrency, body, Policy(), priority) {} +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + + __TBB_NOINLINE_SYM async_node( const async_node &other ) : base_type(other), sender(), my_gateway(self()) { + static_cast(this->my_body->get_body_ptr())->set_gateway(&my_gateway); + static_cast(this->my_init_body->get_body_ptr())->set_gateway(&my_gateway); + + tbb::internal::fgt_multioutput_node_with_body<1>( CODEPTR(), tbb::internal::FLOW_ASYNC_NODE, + &this->my_graph, static_cast *>(this), + this->output_ports(), this->my_body ); + } + + gateway_type& gateway() { + return my_gateway; + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_multioutput_node_desc( this, name ); + } +#endif + + // Define sender< Output > + + //! Add a new successor to this node + bool register_successor( successor_type &r ) __TBB_override { + return internal::output_port<0>(*this).register_successor(r); + } + + //! Removes a successor from this node + bool remove_successor( successor_type &r ) __TBB_override { + return internal::output_port<0>(*this).remove_successor(r); + } + + template + Body copy_function_object() { + typedef internal::multifunction_body mfn_body_type; + typedef internal::async_body async_body_type; + mfn_body_type &body_ref = *this->my_body; + async_body_type ab = *static_cast(dynamic_cast< internal::multifunction_body_leaf & >(body_ref).get_body_ptr()); + return ab.get_body(); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + //! interface to record edges for traversal & deletion + typedef typename internal::edge_container built_successors_type; + typedef typename built_successors_type::edge_list_type successor_list_type; + built_successors_type &built_successors() __TBB_override { + return internal::output_port<0>(*this).built_successors(); + } + + void internal_add_built_successor( successor_type &r ) __TBB_override { + internal::output_port<0>(*this).internal_add_built_successor(r); + } + + void internal_delete_built_successor( successor_type &r ) __TBB_override { + internal::output_port<0>(*this).internal_delete_built_successor(r); + } + + void copy_successors( successor_list_type &l ) __TBB_override { + internal::output_port<0>(*this).copy_successors(l); + } + + size_t successor_count() __TBB_override { + return internal::output_port<0>(*this).successor_count(); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +protected: + + void reset_node( reset_flags f) __TBB_override { + base_type::reset_node(f); + } +}; + +#if __TBB_PREVIEW_STREAMING_NODE +#include "internal/_flow_graph_streaming_node.h" +#endif // __TBB_PREVIEW_STREAMING_NODE + +#include "internal/_flow_graph_node_set_impl.h" + +template< typename T > +class overwrite_node : public graph_node, public receiver, public sender { +public: + typedef T input_type; + typedef T output_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::built_predecessors_type built_predecessors_type; + typedef typename sender::built_successors_type built_successors_type; + typedef typename receiver::predecessor_list_type predecessor_list_type; + typedef typename sender::successor_list_type successor_list_type; +#endif + + __TBB_NOINLINE_SYM explicit overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) { + my_successors.set_owner( this ); + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + overwrite_node(const node_set& nodes) : overwrite_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor; doesn't take anything from src; default won't work + __TBB_NOINLINE_SYM overwrite_node( const overwrite_node& src ) : + graph_node(src.my_graph), receiver(), sender(), my_buffer_is_valid(false) + { + my_successors.set_owner( this ); + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_OVERWRITE_NODE, &this->my_graph, + static_cast *>(this), static_cast *>(this) ); + } + + ~overwrite_node() {} + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + + bool register_successor( successor_type &s ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + if (my_buffer_is_valid && internal::is_graph_active( my_graph )) { + // We have a valid value that must be forwarded immediately. + bool ret = s.try_put( my_buffer ); + if ( ret ) { + // We add the successor that accepted our put + my_successors.register_successor( s ); + } else { + // In case of reservation a race between the moment of reservation and register_successor can appear, + // because failed reserve does not mean that register_successor is not ready to put a message immediately. + // We have some sort of infinite loop: reserving node tries to set pull state for the edge, + // but overwrite_node tries to return push state back. That is why we have to break this loop with task creation. + task *rtask = new ( task::allocate_additional_child_of( *( my_graph.root_task() ) ) ) + register_predecessor_task( *this, s ); + internal::spawn_in_graph_arena( my_graph, *rtask ); + } + } else { + // No valid value yet, just add as successor + my_successors.register_successor( s ); + } + return true; + } + + bool remove_successor( successor_type &s ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.remove_successor(s); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + + void internal_add_built_successor( successor_type &s) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.internal_add_built_successor(s); + } + + void internal_delete_built_successor( successor_type &s) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.internal_delete_built_successor(s); + } + + size_t successor_count() __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + return my_successors.successor_count(); + } + + void copy_successors(successor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_successors.copy_successors(v); + } + + void internal_add_built_predecessor( predecessor_type &p) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_built_predecessors.add_edge(p); + } + + void internal_delete_built_predecessor( predecessor_type &p) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_built_predecessors.delete_edge(p); + } + + size_t predecessor_count() __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + return my_built_predecessors.edge_count(); + } + + void copy_predecessors( predecessor_list_type &v ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + my_built_predecessors.copy_edges(v); + } + + void extract() __TBB_override { + my_buffer_is_valid = false; + built_successors().sender_extract(*this); + built_predecessors().receiver_extract(*this); + } + +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + bool try_get( input_type &v ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + if ( my_buffer_is_valid ) { + v = my_buffer; + return true; + } + return false; + } + + //! Reserves an item + bool try_reserve( T &v ) __TBB_override { + return try_get(v); + } + + //! Releases the reserved item + bool try_release() __TBB_override { return true; } + + //! Consumes the reserved item + bool try_consume() __TBB_override { return true; } + + bool is_valid() { + spin_mutex::scoped_lock l( my_mutex ); + return my_buffer_is_valid; + } + + void clear() { + spin_mutex::scoped_lock l( my_mutex ); + my_buffer_is_valid = false; + } + +protected: + + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + task * try_put_task( const input_type &v ) __TBB_override { + spin_mutex::scoped_lock l( my_mutex ); + return try_put_task_impl(v); + } + + task * try_put_task_impl(const input_type &v) { + my_buffer = v; + my_buffer_is_valid = true; + task * rtask = my_successors.try_put_task(v); + if (!rtask) rtask = SUCCESSFULLY_ENQUEUED; + return rtask; + } + + graph& graph_reference() const __TBB_override { + return my_graph; + } + + //! Breaks an infinite loop between the node reservation and register_successor call + struct register_predecessor_task : public graph_task { + + register_predecessor_task(predecessor_type& owner, successor_type& succ) : + o(owner), s(succ) {}; + + tbb::task* execute() __TBB_override { + if (!s.register_predecessor(o)) { + o.register_successor(s); + } + return NULL; + } + + predecessor_type& o; + successor_type& s; + }; + + spin_mutex my_mutex; + internal::broadcast_cache< input_type, null_rw_mutex > my_successors; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + internal::edge_container my_built_predecessors; +#endif + input_type my_buffer; + bool my_buffer_is_valid; + void reset_receiver(reset_flags /*f*/) __TBB_override {} + + void reset_node( reset_flags f) __TBB_override { + my_buffer_is_valid = false; + if (f&rf_clear_edges) { + my_successors.clear(); + } + } +}; // overwrite_node + +template< typename T > +class write_once_node : public overwrite_node { +public: + typedef T input_type; + typedef T output_type; + typedef overwrite_node base_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename sender::successor_type successor_type; + + //! Constructor + __TBB_NOINLINE_SYM explicit write_once_node(graph& g) : base_type(g) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + template + write_once_node(const node_set& nodes) : write_once_node(nodes.graph_reference()) { + make_edges_in_order(nodes, *this); + } +#endif + + //! Copy constructor: call base class copy constructor + __TBB_NOINLINE_SYM write_once_node( const write_once_node& src ) : base_type(src) { + tbb::internal::fgt_node( CODEPTR(), tbb::internal::FLOW_WRITE_ONCE_NODE, &(this->my_graph), + static_cast *>(this), + static_cast *>(this) ); + } + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name( const char *name ) __TBB_override { + tbb::internal::fgt_node_desc( this, name ); + } +#endif + +protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + task *try_put_task( const T &v ) __TBB_override { + spin_mutex::scoped_lock l( this->my_mutex ); + return this->my_buffer_is_valid ? NULL : this->try_put_task_impl(v); + } +}; + +} // interfaceX + + using interface11::reset_flags; + using interface11::rf_reset_protocol; + using interface11::rf_reset_bodies; + using interface11::rf_clear_edges; + + using interface11::graph; + using interface11::graph_node; + using interface11::continue_msg; + using interface11::source_node; + using interface11::input_node; + using interface11::function_node; + using interface11::multifunction_node; + using interface11::split_node; + using interface11::internal::output_port; + using interface11::indexer_node; + using interface11::internal::tagged_msg; + using interface11::internal::cast_to; + using interface11::internal::is_a; + using interface11::continue_node; + using interface11::overwrite_node; + using interface11::write_once_node; + using interface11::broadcast_node; + using interface11::buffer_node; + using interface11::queue_node; + using interface11::sequencer_node; + using interface11::priority_queue_node; + using interface11::limiter_node; + using namespace interface11::internal::graph_policy_namespace; + using interface11::join_node; + using interface11::input_port; + using interface11::copy_body; + using interface11::make_edge; + using interface11::remove_edge; + using interface11::internal::tag_value; +#if __TBB_FLOW_GRAPH_CPP11_FEATURES + using interface11::composite_node; +#endif + using interface11::async_node; +#if __TBB_PREVIEW_ASYNC_MSG + using interface11::async_msg; +#endif +#if __TBB_PREVIEW_STREAMING_NODE + using interface11::port_ref; + using interface11::streaming_node; +#endif // __TBB_PREVIEW_STREAMING_NODE +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + using internal::node_priority_t; + using internal::no_priority; +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + using interface11::internal::follows; + using interface11::internal::precedes; + using interface11::internal::make_node_set; + using interface11::internal::make_edges; +#endif + +} // flow +} // tbb + +// Include deduction guides for node classes +#include "internal/_flow_graph_nodes_deduction.h" + +#undef __TBB_PFG_RESET_ARG +#undef __TBB_COMMA +#undef __TBB_DEFAULT_NODE_ALLOCATOR + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_flow_graph_H_include_area + +#if TBB_USE_THREADING_TOOLS && TBB_PREVIEW_FLOW_GRAPH_TRACE && ( __linux__ || __APPLE__ ) + #undef __TBB_NOINLINE_SYM +#endif + +#endif // __TBB_flow_graph_H diff --git a/ohos/arm64-v8a/include/tbb/flow_graph_abstractions.h b/ohos/arm64-v8a/include/tbb/flow_graph_abstractions.h new file mode 100644 index 00000000..f8ac239c --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/flow_graph_abstractions.h @@ -0,0 +1,53 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_abstractions_H +#define __TBB_flow_graph_abstractions_H + +namespace tbb { +namespace flow { +namespace interface11 { + +//! Pure virtual template classes that define interfaces for async communication +class graph_proxy { +public: + //! Inform a graph that messages may come from outside, to prevent premature graph completion + virtual void reserve_wait() = 0; + + //! Inform a graph that a previous call to reserve_wait is no longer in effect + virtual void release_wait() = 0; + + virtual ~graph_proxy() {} +}; + +template +class receiver_gateway : public graph_proxy { +public: + //! Type of inputing data into FG. + typedef Input input_type; + + //! Submit signal from an asynchronous activity to FG. + virtual bool try_put(const input_type&) = 0; +}; + +} //interfaceX + +using interface11::graph_proxy; +using interface11::receiver_gateway; + +} //flow +} //tbb +#endif diff --git a/ohos/arm64-v8a/include/tbb/flow_graph_opencl_node.h b/ohos/arm64-v8a/include/tbb/flow_graph_opencl_node.h new file mode 100644 index 00000000..e6670db6 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/flow_graph_opencl_node.h @@ -0,0 +1,1504 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_flow_graph_opencl_node_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_flow_graph_opencl_node_H +#pragma message("TBB Warning: tbb/flow_graph_opencl_node.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_flow_graph_opencl_node_H +#define __TBB_flow_graph_opencl_node_H + +#define __TBB_flow_graph_opencl_node_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb/tbb_config.h" +#if __TBB_PREVIEW_OPENCL_NODE + +#include "flow_graph.h" + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __APPLE__ +#include +#else +#include +#endif + +namespace tbb { +namespace flow { + +namespace interface11 { + +template +class opencl_factory; + +namespace opencl_info { +class default_opencl_factory; +} + +template +class opencl_program; + +inline void enforce_cl_retcode(cl_int err, std::string msg) { + if (err != CL_SUCCESS) { + std::cerr << msg << "; error code: " << err << std::endl; + throw msg; + } +} + +template +T event_info(cl_event e, cl_event_info i) { + T res; + enforce_cl_retcode(clGetEventInfo(e, i, sizeof(res), &res, NULL), "Failed to get OpenCL event information"); + return res; +} + +template +T device_info(cl_device_id d, cl_device_info i) { + T res; + enforce_cl_retcode(clGetDeviceInfo(d, i, sizeof(res), &res, NULL), "Failed to get OpenCL device information"); + return res; +} + +template <> +inline std::string device_info(cl_device_id d, cl_device_info i) { + size_t required; + enforce_cl_retcode(clGetDeviceInfo(d, i, 0, NULL, &required), "Failed to get OpenCL device information"); + + char *buff = (char*)alloca(required); + enforce_cl_retcode(clGetDeviceInfo(d, i, required, buff, NULL), "Failed to get OpenCL device information"); + + return buff; +} + +template +T platform_info(cl_platform_id p, cl_platform_info i) { + T res; + enforce_cl_retcode(clGetPlatformInfo(p, i, sizeof(res), &res, NULL), "Failed to get OpenCL platform information"); + return res; +} + +template <> +inline std::string platform_info(cl_platform_id p, cl_platform_info i) { + size_t required; + enforce_cl_retcode(clGetPlatformInfo(p, i, 0, NULL, &required), "Failed to get OpenCL platform information"); + + char *buff = (char*)alloca(required); + enforce_cl_retcode(clGetPlatformInfo(p, i, required, buff, NULL), "Failed to get OpenCL platform information"); + + return buff; +} + + +class __TBB_DEPRECATED_IN_VERBOSE_MODE opencl_device { +public: + typedef size_t device_id_type; + enum : device_id_type { + unknown = device_id_type( -2 ), + host = device_id_type( -1 ) + }; + + opencl_device() : my_device_id( unknown ), my_cl_device_id( NULL ), my_cl_command_queue( NULL ) {} + + opencl_device( cl_device_id d_id ) : my_device_id( unknown ), my_cl_device_id( d_id ), my_cl_command_queue( NULL ) {} + + opencl_device( cl_device_id cl_d_id, device_id_type device_id ) : my_device_id( device_id ), my_cl_device_id( cl_d_id ), my_cl_command_queue( NULL ) {} + + std::string platform_profile() const { + return platform_info( platform_id(), CL_PLATFORM_PROFILE ); + } + std::string platform_version() const { + return platform_info( platform_id(), CL_PLATFORM_VERSION ); + } + std::string platform_name() const { + return platform_info( platform_id(), CL_PLATFORM_NAME ); + } + std::string platform_vendor() const { + return platform_info( platform_id(), CL_PLATFORM_VENDOR ); + } + std::string platform_extensions() const { + return platform_info( platform_id(), CL_PLATFORM_EXTENSIONS ); + } + + template + void info( cl_device_info i, T &t ) const { + t = device_info( my_cl_device_id, i ); + } + std::string version() const { + // The version string format: OpenCL + return device_info( my_cl_device_id, CL_DEVICE_VERSION ); + } + int major_version() const { + int major; + std::sscanf( version().c_str(), "OpenCL %d", &major ); + return major; + } + int minor_version() const { + int major, minor; + std::sscanf( version().c_str(), "OpenCL %d.%d", &major, &minor ); + return minor; + } + bool out_of_order_exec_mode_on_host_present() const { +#if CL_VERSION_2_0 + if ( major_version() >= 2 ) + return (device_info( my_cl_device_id, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES ) & CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE) != 0; + else +#endif /* CL_VERSION_2_0 */ + return (device_info( my_cl_device_id, CL_DEVICE_QUEUE_PROPERTIES ) & CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE) != 0; + } + bool out_of_order_exec_mode_on_device_present() const { +#if CL_VERSION_2_0 + if ( major_version() >= 2 ) + return (device_info( my_cl_device_id, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES ) & CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE) != 0; + else +#endif /* CL_VERSION_2_0 */ + return false; + } + std::array max_work_item_sizes() const { + return device_info>( my_cl_device_id, CL_DEVICE_MAX_WORK_ITEM_SIZES ); + } + size_t max_work_group_size() const { + return device_info( my_cl_device_id, CL_DEVICE_MAX_WORK_GROUP_SIZE ); + } + bool built_in_kernel_available( const std::string& k ) const { + const std::string semi = ";"; + // Added semicolumns to force an exact match (to avoid a partial match, e.g. "add" is partly matched with "madd"). + return (semi + built_in_kernels() + semi).find( semi + k + semi ) != std::string::npos; + } + std::string built_in_kernels() const { + return device_info( my_cl_device_id, CL_DEVICE_BUILT_IN_KERNELS ); + } + std::string name() const { + return device_info( my_cl_device_id, CL_DEVICE_NAME ); + } + cl_bool available() const { + return device_info( my_cl_device_id, CL_DEVICE_AVAILABLE ); + } + cl_bool compiler_available() const { + return device_info( my_cl_device_id, CL_DEVICE_COMPILER_AVAILABLE ); + } + cl_bool linker_available() const { + return device_info( my_cl_device_id, CL_DEVICE_LINKER_AVAILABLE ); + } + bool extension_available( const std::string &ext ) const { + const std::string space = " "; + // Added space to force an exact match (to avoid a partial match, e.g. "ext" is partly matched with "ext2"). + return (space + extensions() + space).find( space + ext + space ) != std::string::npos; + } + std::string extensions() const { + return device_info( my_cl_device_id, CL_DEVICE_EXTENSIONS ); + } + + cl_device_type type() const { + return device_info( my_cl_device_id, CL_DEVICE_TYPE ); + } + + std::string vendor() const { + return device_info( my_cl_device_id, CL_DEVICE_VENDOR ); + } + + cl_uint address_bits() const { + return device_info( my_cl_device_id, CL_DEVICE_ADDRESS_BITS ); + } + + cl_device_id device_id() const { + return my_cl_device_id; + } + + cl_command_queue command_queue() const { + return my_cl_command_queue; + } + + void set_command_queue( cl_command_queue cmd_queue ) { + my_cl_command_queue = cmd_queue; + } + + cl_platform_id platform_id() const { + return device_info( my_cl_device_id, CL_DEVICE_PLATFORM ); + } + +private: + + device_id_type my_device_id; + cl_device_id my_cl_device_id; + cl_command_queue my_cl_command_queue; + + friend bool operator==(opencl_device d1, opencl_device d2) { return d1.my_cl_device_id == d2.my_cl_device_id; } + + template + friend class opencl_factory; + template + friend class opencl_memory; + template + friend class opencl_program; + +#if TBB_USE_ASSERT + template + friend class opencl_buffer; +#endif +}; + +class __TBB_DEPRECATED_IN_VERBOSE_MODE opencl_device_list { + typedef std::vector container_type; +public: + typedef container_type::iterator iterator; + typedef container_type::const_iterator const_iterator; + typedef container_type::size_type size_type; + + opencl_device_list() {} + opencl_device_list( std::initializer_list il ) : my_container( il ) {} + + void add( opencl_device d ) { my_container.push_back( d ); } + size_type size() const { return my_container.size(); } + bool empty() const { return my_container.empty(); } + iterator begin() { return my_container.begin(); } + iterator end() { return my_container.end(); } + const_iterator begin() const { return my_container.begin(); } + const_iterator end() const { return my_container.end(); } + const_iterator cbegin() const { return my_container.cbegin(); } + const_iterator cend() const { return my_container.cend(); } + +private: + container_type my_container; +}; + +namespace internal { + +// Retrieve all OpenCL devices from machine +inline opencl_device_list find_available_devices() { + opencl_device_list opencl_devices; + + cl_uint num_platforms; + enforce_cl_retcode(clGetPlatformIDs(0, NULL, &num_platforms), "clGetPlatformIDs failed"); + + std::vector platforms(num_platforms); + enforce_cl_retcode(clGetPlatformIDs(num_platforms, platforms.data(), NULL), "clGetPlatformIDs failed"); + + cl_uint num_devices; + std::vector::iterator platforms_it = platforms.begin(); + cl_uint num_all_devices = 0; + while (platforms_it != platforms.end()) { + cl_int err = clGetDeviceIDs(*platforms_it, CL_DEVICE_TYPE_ALL, 0, NULL, &num_devices); + if (err == CL_DEVICE_NOT_FOUND) { + platforms_it = platforms.erase(platforms_it); + } + else { + enforce_cl_retcode(err, "clGetDeviceIDs failed"); + num_all_devices += num_devices; + ++platforms_it; + } + } + + std::vector devices(num_all_devices); + std::vector::iterator devices_it = devices.begin(); + for (auto p = platforms.begin(); p != platforms.end(); ++p) { + enforce_cl_retcode(clGetDeviceIDs((*p), CL_DEVICE_TYPE_ALL, (cl_uint)std::distance(devices_it, devices.end()), &*devices_it, &num_devices), "clGetDeviceIDs failed"); + devices_it += num_devices; + } + + for (auto d = devices.begin(); d != devices.end(); ++d) { + opencl_devices.add(opencl_device((*d))); + } + + return opencl_devices; +} + +} // namespace internal + +// TODO: consider this namespace as public API +namespace opencl_info { + + inline const opencl_device_list& available_devices() { + // Static storage for all available OpenCL devices on machine + static const opencl_device_list my_devices = internal::find_available_devices(); + return my_devices; + } + +} // namespace opencl_info + + +class callback_base : tbb::internal::no_copy { +public: + virtual void call() = 0; + virtual ~callback_base() {} +}; + +template +class callback : public callback_base { + Callback my_callback; + T my_data; +public: + callback( Callback c, const T& t ) : my_callback( c ), my_data( t ) {} + + void call() __TBB_override { + my_callback( my_data ); + } +}; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE opencl_async_msg : public async_msg { +public: + typedef T value_type; + + opencl_async_msg() : my_callback_flag_ptr( std::make_shared< tbb::atomic>() ) { + my_callback_flag_ptr->store(false); + } + + explicit opencl_async_msg( const T& data ) : my_data(data), my_callback_flag_ptr( std::make_shared>() ) { + my_callback_flag_ptr->store(false); + } + + opencl_async_msg( const T& data, cl_event event ) : my_data(data), my_event(event), my_is_event(true), my_callback_flag_ptr( std::make_shared>() ) { + my_callback_flag_ptr->store(false); + enforce_cl_retcode( clRetainEvent( my_event ), "Failed to retain an event" ); + } + + T& data( bool wait = true ) { + if ( my_is_event && wait ) { + enforce_cl_retcode( clWaitForEvents( 1, &my_event ), "Failed to wait for an event" ); + enforce_cl_retcode( clReleaseEvent( my_event ), "Failed to release an event" ); + my_is_event = false; + } + return my_data; + } + + const T& data( bool wait = true ) const { + if ( my_is_event && wait ) { + enforce_cl_retcode( clWaitForEvents( 1, &my_event ), "Failed to wait for an event" ); + enforce_cl_retcode( clReleaseEvent( my_event ), "Failed to release an event" ); + my_is_event = false; + } + return my_data; + } + + opencl_async_msg( const opencl_async_msg &dmsg ) : async_msg(dmsg), + my_data(dmsg.my_data), my_event(dmsg.my_event), my_is_event( dmsg.my_is_event ), + my_callback_flag_ptr(dmsg.my_callback_flag_ptr) + { + if ( my_is_event ) + enforce_cl_retcode( clRetainEvent( my_event ), "Failed to retain an event" ); + } + + opencl_async_msg( opencl_async_msg &&dmsg ) : async_msg(std::move(dmsg)), + my_data(std::move(dmsg.my_data)), my_event(dmsg.my_event), my_is_event(dmsg.my_is_event), + my_callback_flag_ptr( std::move(dmsg.my_callback_flag_ptr) ) + { + dmsg.my_is_event = false; + } + + opencl_async_msg& operator=(const opencl_async_msg &dmsg) { + async_msg::operator =(dmsg); + + // Release original event + if ( my_is_event ) + enforce_cl_retcode( clReleaseEvent( my_event ), "Failed to retain an event" ); + + my_data = dmsg.my_data; + my_event = dmsg.my_event; + my_is_event = dmsg.my_is_event; + + // Retain copied event + if ( my_is_event ) + enforce_cl_retcode( clRetainEvent( my_event ), "Failed to retain an event" ); + + my_callback_flag_ptr = dmsg.my_callback_flag_ptr; + return *this; + } + + ~opencl_async_msg() { + if ( my_is_event ) + enforce_cl_retcode( clReleaseEvent( my_event ), "Failed to release an event" ); + } + + cl_event const * get_event() const { return my_is_event ? &my_event : NULL; } + void set_event( cl_event e ) const { + if ( my_is_event ) { + cl_command_queue cq = event_info( my_event, CL_EVENT_COMMAND_QUEUE ); + if ( cq != event_info( e, CL_EVENT_COMMAND_QUEUE ) ) + enforce_cl_retcode( clFlush( cq ), "Failed to flush an OpenCL command queue" ); + enforce_cl_retcode( clReleaseEvent( my_event ), "Failed to release an event" ); + } + my_is_event = true; + my_event = e; + clRetainEvent( my_event ); + } + + void clear_event() const { + if ( my_is_event ) { + enforce_cl_retcode( clFlush( event_info( my_event, CL_EVENT_COMMAND_QUEUE ) ), "Failed to flush an OpenCL command queue" ); + enforce_cl_retcode( clReleaseEvent( my_event ), "Failed to release an event" ); + } + my_is_event = false; + } + + template + void register_callback( Callback c ) const { + __TBB_ASSERT( my_is_event, "The OpenCL event is not set" ); + enforce_cl_retcode( clSetEventCallback( my_event, CL_COMPLETE, register_callback_func, new callback( c, my_data ) ), "Failed to set an OpenCL callback" ); + } + + operator T&() { return data(); } + operator const T&() const { return data(); } + +protected: + // Overridden in this derived class to inform that + // async calculation chain is over + void finalize() const __TBB_override { + receive_if_memory_object(*this); + if (! my_callback_flag_ptr->fetch_and_store(true)) { + opencl_async_msg a(*this); + if (my_is_event) { + register_callback([a](const T& t) mutable { + a.set(t); + }); + } + else { + a.set(my_data); + } + } + clear_event(); + } + +private: + static void CL_CALLBACK register_callback_func( cl_event, cl_int event_command_exec_status, void *data ) { + tbb::internal::suppress_unused_warning( event_command_exec_status ); + __TBB_ASSERT( event_command_exec_status == CL_COMPLETE, NULL ); + __TBB_ASSERT( data, NULL ); + callback_base *c = static_cast(data); + c->call(); + delete c; + } + + T my_data; + mutable cl_event my_event; + mutable bool my_is_event = false; + + std::shared_ptr< tbb::atomic > my_callback_flag_ptr; +}; + +template +K key_from_message( const opencl_async_msg &dmsg ) { + using tbb::flow::key_from_message; + const T &t = dmsg.data( false ); + __TBB_STATIC_ASSERT( true, "" ); + return key_from_message( t ); +} + +template +class opencl_memory { +public: + opencl_memory() {} + opencl_memory( Factory &f ) : my_host_ptr( NULL ), my_factory( &f ), my_sending_event_present( false ) { + my_curr_device_id = my_factory->devices().begin()->my_device_id; + } + + virtual ~opencl_memory() { + if ( my_sending_event_present ) enforce_cl_retcode( clReleaseEvent( my_sending_event ), "Failed to release an event for the OpenCL buffer" ); + enforce_cl_retcode( clReleaseMemObject( my_cl_mem ), "Failed to release an memory object" ); + } + + cl_mem get_cl_mem() const { + return my_cl_mem; + } + + void* get_host_ptr() { + if ( !my_host_ptr ) { + opencl_async_msg d = receive( NULL ); + d.data(); + __TBB_ASSERT( d.data() == my_host_ptr, NULL ); + } + return my_host_ptr; + } + + Factory *factory() const { return my_factory; } + + opencl_async_msg receive(const cl_event *e) { + opencl_async_msg d; + if (e) { + d = opencl_async_msg(my_host_ptr, *e); + } else { + d = opencl_async_msg(my_host_ptr); + } + + // Concurrent receives are prohibited so we do not worry about synchronization. + if (my_curr_device_id.load() != opencl_device::host) { + map_memory(*my_factory->devices().begin(), d); + my_curr_device_id.store(opencl_device::host); + my_host_ptr = d.data(false); + } + // Release the sending event + if (my_sending_event_present) { + enforce_cl_retcode(clReleaseEvent(my_sending_event), "Failed to release an event"); + my_sending_event_present = false; + } + return d; + } + + opencl_async_msg send(opencl_device device, const cl_event *e) { + opencl_device::device_id_type device_id = device.my_device_id; + if (!my_factory->is_same_context(my_curr_device_id.load(), device_id)) { + { + tbb::spin_mutex::scoped_lock lock(my_sending_lock); + if (!my_factory->is_same_context(my_curr_device_id.load(), device_id)) { + __TBB_ASSERT(my_host_ptr, "The buffer has not been mapped"); + opencl_async_msg d(my_host_ptr); + my_factory->enqueue_unmap_buffer(device, *this, d); + my_sending_event = *d.get_event(); + my_sending_event_present = true; + enforce_cl_retcode(clRetainEvent(my_sending_event), "Failed to retain an event"); + my_host_ptr = NULL; + my_curr_device_id.store(device_id); + } + } + __TBB_ASSERT(my_sending_event_present, NULL); + } + + // !e means that buffer has come from the host + if (!e && my_sending_event_present) e = &my_sending_event; + + __TBB_ASSERT(!my_host_ptr, "The buffer has not been unmapped"); + return e ? opencl_async_msg(NULL, *e) : opencl_async_msg(NULL); + } + + virtual void map_memory( opencl_device, opencl_async_msg & ) = 0; +protected: + cl_mem my_cl_mem; + tbb::atomic my_curr_device_id; + void* my_host_ptr; + Factory *my_factory; + + tbb::spin_mutex my_sending_lock; + bool my_sending_event_present; + cl_event my_sending_event; +}; + +template +class opencl_buffer_impl : public opencl_memory { + size_t my_size; +public: + opencl_buffer_impl( size_t size, Factory& f ) : opencl_memory( f ), my_size( size ) { + cl_int err; + this->my_cl_mem = clCreateBuffer( this->my_factory->context(), CL_MEM_ALLOC_HOST_PTR, size, NULL, &err ); + enforce_cl_retcode( err, "Failed to create an OpenCL buffer" ); + } + + // The constructor for subbuffers. + opencl_buffer_impl( cl_mem m, size_t index, size_t size, Factory& f ) : opencl_memory( f ), my_size( size ) { + cl_int err; + cl_buffer_region region = { index, size }; + this->my_cl_mem = clCreateSubBuffer( m, 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err ); + enforce_cl_retcode( err, "Failed to create an OpenCL subbuffer" ); + } + + size_t size() const { + return my_size; + } + + void map_memory( opencl_device device, opencl_async_msg &dmsg ) __TBB_override { + this->my_factory->enqueue_map_buffer( device, *this, dmsg ); + } + +#if TBB_USE_ASSERT + template + friend class opencl_buffer; +#endif +}; + +enum access_type { + read_write, + write_only, + read_only +}; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE +opencl_subbuffer; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE +opencl_buffer { +public: + typedef cl_mem native_object_type; + typedef opencl_buffer memory_object_type; + typedef Factory opencl_factory_type; + + template using iterator = T*; + + template + iterator
access() const { + T* ptr = (T*)my_impl->get_host_ptr(); + __TBB_ASSERT( ptr, NULL ); + return iterator( ptr ); + } + + T* data() const { return &access()[0]; } + + template + iterator begin() const { return access(); } + + template + iterator end() const { return access()+my_impl->size()/sizeof(T); } + + size_t size() const { return my_impl->size()/sizeof(T); } + + T& operator[] ( ptrdiff_t k ) { return begin()[k]; } + + opencl_buffer() {} + opencl_buffer( size_t size ); + opencl_buffer( Factory &f, size_t size ) : my_impl( std::make_shared( size*sizeof(T), f ) ) {} + + cl_mem native_object() const { + return my_impl->get_cl_mem(); + } + + const opencl_buffer& memory_object() const { + return *this; + } + + void send( opencl_device device, opencl_async_msg &dependency ) const { + __TBB_ASSERT( dependency.data( /*wait = */false ) == *this, NULL ); + opencl_async_msg d = my_impl->send( device, dependency.get_event() ); + const cl_event *e = d.get_event(); + if ( e ) dependency.set_event( *e ); + else dependency.clear_event(); + } + void receive( const opencl_async_msg &dependency ) const { + __TBB_ASSERT( dependency.data( /*wait = */false ) == *this, NULL ); + opencl_async_msg d = my_impl->receive( dependency.get_event() ); + const cl_event *e = d.get_event(); + if ( e ) dependency.set_event( *e ); + else dependency.clear_event(); + } + + opencl_subbuffer subbuffer( size_t index, size_t size ) const; +private: + // The constructor for subbuffers. + opencl_buffer( Factory &f, cl_mem m, size_t index, size_t size ) : my_impl( std::make_shared( m, index*sizeof(T), size*sizeof(T), f ) ) {} + + typedef opencl_buffer_impl impl_type; + + std::shared_ptr my_impl; + + friend bool operator==(const opencl_buffer &lhs, const opencl_buffer &rhs) { + return lhs.my_impl == rhs.my_impl; + } + + template + friend class opencl_factory; + template + friend class opencl_subbuffer; +}; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE +opencl_subbuffer : public opencl_buffer { + opencl_buffer my_owner; +public: + opencl_subbuffer() {} + opencl_subbuffer( const opencl_buffer &owner, size_t index, size_t size ) : + opencl_buffer( *owner.my_impl->factory(), owner.native_object(), index, size ), my_owner( owner ) {} +}; + +template +opencl_subbuffer opencl_buffer::subbuffer( size_t index, size_t size ) const { + return opencl_subbuffer( *this, index, size ); +} + + +#define is_typedef(type) \ + template \ + struct is_##type { \ + template \ + static std::true_type check( typename C::type* ); \ + template \ + static std::false_type check( ... ); \ + \ + static const bool value = decltype(check(0))::value; \ + } + +is_typedef( native_object_type ); +is_typedef( memory_object_type ); + +template +typename std::enable_if::value, typename T::native_object_type>::type get_native_object( const T &t ) { + return t.native_object(); +} + +template +typename std::enable_if::value, T>::type get_native_object( T t ) { + return t; +} + +// send_if_memory_object checks if the T type has memory_object_type and call the send method for the object. +template +typename std::enable_if::value>::type send_if_memory_object( opencl_device device, opencl_async_msg &dmsg ) { + const T &t = dmsg.data( false ); + typedef typename T::memory_object_type mem_obj_t; + mem_obj_t mem_obj = t.memory_object(); + opencl_async_msg d( mem_obj ); + if ( dmsg.get_event() ) d.set_event( *dmsg.get_event() ); + mem_obj.send( device, d ); + if ( d.get_event() ) dmsg.set_event( *d.get_event() ); +} + +template +typename std::enable_if::value>::type send_if_memory_object( opencl_device device, T &t ) { + typedef typename T::memory_object_type mem_obj_t; + mem_obj_t mem_obj = t.memory_object(); + opencl_async_msg dmsg( mem_obj ); + mem_obj.send( device, dmsg ); +} + +template +typename std::enable_if::value>::type send_if_memory_object( opencl_device, T& ) {}; + +// receive_if_memory_object checks if the T type has memory_object_type and call the receive method for the object. +template +typename std::enable_if::value>::type receive_if_memory_object( const opencl_async_msg &dmsg ) { + const T &t = dmsg.data( false ); + typedef typename T::memory_object_type mem_obj_t; + mem_obj_t mem_obj = t.memory_object(); + opencl_async_msg d( mem_obj ); + if ( dmsg.get_event() ) d.set_event( *dmsg.get_event() ); + mem_obj.receive( d ); + if ( d.get_event() ) dmsg.set_event( *d.get_event() ); +} + +template +typename std::enable_if::value>::type receive_if_memory_object( const T& ) {} + +class __TBB_DEPRECATED_IN_VERBOSE_MODE opencl_range { +public: + typedef size_t range_index_type; + typedef std::array nd_range_type; + + template , typename L = std::initializer_list, + typename = typename std::enable_if::type, opencl_range>::value>::type> + opencl_range(G&& global_work = std::initializer_list({ 0 }), L&& local_work = std::initializer_list({ 0, 0, 0 })) { + auto g_it = global_work.begin(); + auto l_it = local_work.begin(); + my_global_work_size = { {size_t(-1), size_t(-1), size_t(-1)} }; + // my_local_work_size is still uninitialized + for (int s = 0; s < 3 && g_it != global_work.end(); ++g_it, ++l_it, ++s) { + __TBB_ASSERT(l_it != local_work.end(), "global_work & local_work must have same size"); + my_global_work_size[s] = *g_it; + my_local_work_size[s] = *l_it; + } + } + + const nd_range_type& global_range() const { return my_global_work_size; } + const nd_range_type& local_range() const { return my_local_work_size; } + +private: + nd_range_type my_global_work_size; + nd_range_type my_local_work_size; +}; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE opencl_factory { +public: + template using async_msg_type = opencl_async_msg>; + typedef opencl_device device_type; + + class kernel : tbb::internal::no_assign { + public: + kernel( const kernel& k ) : my_factory( k.my_factory ) { + // Clone my_cl_kernel via opencl_program + size_t ret_size = 0; + + std::vector kernel_name; + for ( size_t curr_size = 32;; curr_size <<= 1 ) { + kernel_name.resize( curr_size <<= 1 ); + enforce_cl_retcode( clGetKernelInfo( k.my_cl_kernel, CL_KERNEL_FUNCTION_NAME, curr_size, kernel_name.data(), &ret_size ), "Failed to get kernel info" ); + if ( ret_size < curr_size ) break; + } + + cl_program program; + enforce_cl_retcode( clGetKernelInfo( k.my_cl_kernel, CL_KERNEL_PROGRAM, sizeof(program), &program, &ret_size ), "Failed to get kernel info" ); + __TBB_ASSERT( ret_size == sizeof(program), NULL ); + + my_cl_kernel = opencl_program< factory_type >( my_factory, program ).get_cl_kernel( kernel_name.data() ); + } + + ~kernel() { + enforce_cl_retcode( clReleaseKernel( my_cl_kernel ), "Failed to release a kernel" ); + } + + private: + typedef opencl_factory factory_type; + + kernel( const cl_kernel& k, factory_type& f ) : my_cl_kernel( k ), my_factory( f ) {} + + // Data + cl_kernel my_cl_kernel; + factory_type& my_factory; + + template + friend class opencl_factory; + + template + friend class opencl_program; + }; + + typedef kernel kernel_type; + + // 'range_type' enables kernel_executor with range support + // it affects expectations for enqueue_kernel(.....) interface method + typedef opencl_range range_type; + + opencl_factory() {} + ~opencl_factory() { + if ( my_devices.size() ) { + for ( auto d = my_devices.begin(); d != my_devices.end(); ++d ) { + enforce_cl_retcode( clReleaseCommandQueue( (*d).my_cl_command_queue ), "Failed to release a command queue" ); + } + enforce_cl_retcode( clReleaseContext( my_cl_context ), "Failed to release a context" ); + } + } + + bool init( const opencl_device_list &device_list ) { + tbb::spin_mutex::scoped_lock lock( my_devices_mutex ); + if ( !my_devices.size() ) { + my_devices = device_list; + return true; + } + return false; + } + + +private: + template + void enqueue_map_buffer( opencl_device device, opencl_buffer_impl &buffer, opencl_async_msg& dmsg ) { + cl_event const* e1 = dmsg.get_event(); + cl_event e2; + cl_int err; + void *ptr = clEnqueueMapBuffer( device.my_cl_command_queue, buffer.get_cl_mem(), false, CL_MAP_READ | CL_MAP_WRITE, 0, buffer.size(), + e1 == NULL ? 0 : 1, e1, &e2, &err ); + enforce_cl_retcode( err, "Failed to map a buffer" ); + dmsg.data( false ) = ptr; + dmsg.set_event( e2 ); + enforce_cl_retcode( clReleaseEvent( e2 ), "Failed to release an event" ); + } + + + template + void enqueue_unmap_buffer( opencl_device device, opencl_memory &memory, opencl_async_msg& dmsg ) { + cl_event const* e1 = dmsg.get_event(); + cl_event e2; + enforce_cl_retcode( + clEnqueueUnmapMemObject( device.my_cl_command_queue, memory.get_cl_mem(), memory.get_host_ptr(), e1 == NULL ? 0 : 1, e1, &e2 ), + "Failed to unmap a buffer" ); + dmsg.set_event( e2 ); + enforce_cl_retcode( clReleaseEvent( e2 ), "Failed to release an event" ); + } + + // --------- Kernel argument & event list helpers --------- // + template + void process_one_arg( const kernel_type& kernel, std::array&, int&, int& place, const T& t ) { + auto p = get_native_object(t); + enforce_cl_retcode( clSetKernelArg(kernel.my_cl_kernel, place++, sizeof(p), &p), "Failed to set a kernel argument" ); + } + + template + void process_one_arg( const kernel_type& kernel, std::array& events, int& num_events, int& place, const opencl_async_msg& msg ) { + __TBB_ASSERT((static_cast::size_type>(num_events) < events.size()), NULL); + + const cl_event * const e = msg.get_event(); + if (e != NULL) { + events[num_events++] = *e; + } + + process_one_arg( kernel, events, num_events, place, msg.data(false) ); + } + + template + void process_arg_list( const kernel_type& kernel, std::array& events, int& num_events, int& place, const T& t, const Rest&... args ) { + process_one_arg( kernel, events, num_events, place, t ); + process_arg_list( kernel, events, num_events, place, args... ); + } + + template + void process_arg_list( const kernel_type&, std::array&, int&, int& ) {} + // ------------------------------------------- // + template + void update_one_arg( cl_event, T& ) {} + + template + void update_one_arg( cl_event e, opencl_async_msg& msg ) { + msg.set_event( e ); + } + + template + void update_arg_list( cl_event e, T& t, Rest&... args ) { + update_one_arg( e, t ); + update_arg_list( e, args... ); + } + + void update_arg_list( cl_event ) {} + // ------------------------------------------- // +public: + template + void send_kernel( opencl_device device, const kernel_type& kernel, const range_type& work_size, Args&... args ) { + std::array events; + int num_events = 0; + int place = 0; + process_arg_list( kernel, events, num_events, place, args... ); + + const cl_event e = send_kernel_impl( device, kernel.my_cl_kernel, work_size, num_events, events.data() ); + + update_arg_list(e, args...); + + // Release our own reference to cl_event + enforce_cl_retcode( clReleaseEvent(e), "Failed to release an event" ); + } + + // ------------------------------------------- // + template + void send_data(opencl_device device, T& t, Rest&... args) { + send_if_memory_object( device, t ); + send_data( device, args... ); + } + + void send_data(opencl_device) {} + // ------------------------------------------- // + +private: + cl_event send_kernel_impl( opencl_device device, const cl_kernel& kernel, + const range_type& work_size, cl_uint num_events, cl_event* event_list ) { + const typename range_type::nd_range_type g_offset = { { 0, 0, 0 } }; + const typename range_type::nd_range_type& g_size = work_size.global_range(); + const typename range_type::nd_range_type& l_size = work_size.local_range(); + cl_uint s; + for ( s = 1; s < 3 && g_size[s] != size_t(-1); ++s) {} + cl_event event; + enforce_cl_retcode( + clEnqueueNDRangeKernel( device.my_cl_command_queue, kernel, s, + g_offset.data(), g_size.data(), l_size[0] ? l_size.data() : NULL, num_events, num_events ? event_list : NULL, &event ), + "Failed to enqueue a kernel" ); + return event; + } + + // ------------------------------------------- // + template + bool get_event_from_one_arg( cl_event&, const T& ) { + return false; + } + + template + bool get_event_from_one_arg( cl_event& e, const opencl_async_msg& msg) { + cl_event const *e_ptr = msg.get_event(); + + if ( e_ptr != NULL ) { + e = *e_ptr; + return true; + } + + return false; + } + + template + bool get_event_from_args( cl_event& e, const T& t, const Rest&... args ) { + if ( get_event_from_one_arg( e, t ) ) { + return true; + } + + return get_event_from_args( e, args... ); + } + + bool get_event_from_args( cl_event& ) { + return false; + } + // ------------------------------------------- // + + struct finalize_fn : tbb::internal::no_assign { + virtual ~finalize_fn() {} + virtual void operator() () {} + }; + + template + struct finalize_fn_leaf : public finalize_fn { + Fn my_fn; + finalize_fn_leaf(Fn fn) : my_fn(fn) {} + void operator() () __TBB_override { my_fn(); } + }; + + static void CL_CALLBACK finalize_callback(cl_event, cl_int event_command_exec_status, void *data) { + tbb::internal::suppress_unused_warning(event_command_exec_status); + __TBB_ASSERT(event_command_exec_status == CL_COMPLETE, NULL); + + finalize_fn * const fn_ptr = static_cast(data); + __TBB_ASSERT(fn_ptr != NULL, "Invalid finalize function pointer"); + (*fn_ptr)(); + + // Function pointer was created by 'new' & this callback must be called once only + delete fn_ptr; + } +public: + template + void finalize( opencl_device device, FinalizeFn fn, Args&... args ) { + cl_event e; + + if ( get_event_from_args( e, args... ) ) { + enforce_cl_retcode( clSetEventCallback( e, CL_COMPLETE, finalize_callback, + new finalize_fn_leaf(fn) ), "Failed to set a callback" ); + } + + enforce_cl_retcode( clFlush( device.my_cl_command_queue ), "Failed to flush an OpenCL command queue" ); + } + + const opencl_device_list& devices() { + std::call_once( my_once_flag, &opencl_factory::init_once, this ); + return my_devices; + } + +private: + bool is_same_context( opencl_device::device_id_type d1, opencl_device::device_id_type d2 ) { + __TBB_ASSERT( d1 != opencl_device::unknown && d2 != opencl_device::unknown, NULL ); + // Currently, factory supports only one context so if the both devices are not host it means the are in the same context. + if ( d1 != opencl_device::host && d2 != opencl_device::host ) + return true; + return d1 == d2; + } +private: + opencl_factory( const opencl_factory& ); + opencl_factory& operator=(const opencl_factory&); + + cl_context context() { + std::call_once( my_once_flag, &opencl_factory::init_once, this ); + return my_cl_context; + } + + void init_once() { + { + tbb::spin_mutex::scoped_lock lock(my_devices_mutex); + if (!my_devices.size()) + my_devices = DeviceFilter()( opencl_info::available_devices() ); + } + + enforce_cl_retcode(my_devices.size() ? CL_SUCCESS : CL_INVALID_DEVICE, "No devices in the device list"); + cl_platform_id platform_id = my_devices.begin()->platform_id(); + for (opencl_device_list::iterator it = ++my_devices.begin(); it != my_devices.end(); ++it) + enforce_cl_retcode(it->platform_id() == platform_id ? CL_SUCCESS : CL_INVALID_PLATFORM, "All devices should be in the same platform"); + + std::vector cl_device_ids; + for (auto d = my_devices.begin(); d != my_devices.end(); ++d) { + cl_device_ids.push_back((*d).my_cl_device_id); + } + + cl_context_properties context_properties[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties)platform_id, (cl_context_properties)NULL }; + cl_int err; + cl_context ctx = clCreateContext(context_properties, + (cl_uint)cl_device_ids.size(), + cl_device_ids.data(), + NULL, NULL, &err); + enforce_cl_retcode(err, "Failed to create context"); + my_cl_context = ctx; + + size_t device_counter = 0; + for (auto d = my_devices.begin(); d != my_devices.end(); d++) { + (*d).my_device_id = device_counter++; + cl_int err2; + cl_command_queue cq; +#if CL_VERSION_2_0 + if ((*d).major_version() >= 2) { + if ((*d).out_of_order_exec_mode_on_host_present()) { + cl_queue_properties props[] = { CL_QUEUE_PROPERTIES, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, 0 }; + cq = clCreateCommandQueueWithProperties(ctx, (*d).my_cl_device_id, props, &err2); + } else { + cl_queue_properties props[] = { 0 }; + cq = clCreateCommandQueueWithProperties(ctx, (*d).my_cl_device_id, props, &err2); + } + } else +#endif + { + cl_command_queue_properties props = (*d).out_of_order_exec_mode_on_host_present() ? CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE : 0; + // Suppress "declared deprecated" warning for the next line. +#if __TBB_GCC_WARNING_SUPPRESSION_PRESENT +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif +#if _MSC_VER || __INTEL_COMPILER +#pragma warning( push ) +#if __INTEL_COMPILER +#pragma warning (disable: 1478) +#else +#pragma warning (disable: 4996) +#endif +#endif + cq = clCreateCommandQueue(ctx, (*d).my_cl_device_id, props, &err2); +#if _MSC_VER || __INTEL_COMPILER +#pragma warning( pop ) +#endif +#if __TBB_GCC_WARNING_SUPPRESSION_PRESENT +#pragma GCC diagnostic pop +#endif + } + enforce_cl_retcode(err2, "Failed to create command queue"); + (*d).my_cl_command_queue = cq; + } + } + + std::once_flag my_once_flag; + opencl_device_list my_devices; + cl_context my_cl_context; + + tbb::spin_mutex my_devices_mutex; + + template + friend class opencl_program; + template + friend class opencl_buffer_impl; + template + friend class opencl_memory; +}; // class opencl_factory + +// TODO: consider this namespace as public API +namespace opencl_info { + +// Default types + +template +struct default_device_selector { + opencl_device operator()(Factory& f) { + __TBB_ASSERT(!f.devices().empty(), "No available devices"); + return *(f.devices().begin()); + } +}; + +struct default_device_filter { + opencl_device_list operator()(const opencl_device_list &devices) { + opencl_device_list dl; + cl_platform_id platform_id = devices.begin()->platform_id(); + for (opencl_device_list::const_iterator it = devices.cbegin(); it != devices.cend(); ++it) { + if (it->platform_id() == platform_id) { + dl.add(*it); + } + } + return dl; + } +}; + +class default_opencl_factory : public opencl_factory < default_device_filter >, tbb::internal::no_copy { +public: + template using async_msg_type = opencl_async_msg; + + friend default_opencl_factory& default_factory(); + +private: + default_opencl_factory() = default; +}; + +inline default_opencl_factory& default_factory() { + static default_opencl_factory default_factory; + return default_factory; +} + +} // namespace opencl_info + +template +opencl_buffer::opencl_buffer( size_t size ) : my_impl( std::make_shared( size*sizeof(T), opencl_info::default_factory() ) ) {} + + +enum class opencl_program_type { + SOURCE, + PRECOMPILED, + SPIR +}; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE opencl_program : tbb::internal::no_assign { +public: + typedef typename Factory::kernel_type kernel_type; + + opencl_program( Factory& factory, opencl_program_type type, const std::string& program_name ) : my_factory( factory ), my_type(type) , my_arg_str( program_name) {} + opencl_program( Factory& factory, const char* program_name ) : opencl_program( factory, std::string( program_name ) ) {} + opencl_program( Factory& factory, const std::string& program_name ) : opencl_program( factory, opencl_program_type::SOURCE, program_name ) {} + + opencl_program( opencl_program_type type, const std::string& program_name ) : opencl_program( opencl_info::default_factory(), type, program_name ) {} + opencl_program( const char* program_name ) : opencl_program( opencl_info::default_factory(), program_name ) {} + opencl_program( const std::string& program_name ) : opencl_program( opencl_info::default_factory(), program_name ) {} + opencl_program( opencl_program_type type ) : opencl_program( opencl_info::default_factory(), type ) {} + + opencl_program( const opencl_program &src ) : my_factory( src.my_factory ), my_type( src.type ), my_arg_str( src.my_arg_str ), my_cl_program( src.my_cl_program ) { + // Set my_do_once_flag to the called state. + std::call_once( my_do_once_flag, [](){} ); + } + + kernel_type get_kernel( const std::string& k ) const { + return kernel_type( get_cl_kernel(k), my_factory ); + } + +private: + opencl_program( Factory& factory, cl_program program ) : my_factory( factory ), my_cl_program( program ) { + // Set my_do_once_flag to the called state. + std::call_once( my_do_once_flag, [](){} ); + } + + cl_kernel get_cl_kernel( const std::string& k ) const { + std::call_once( my_do_once_flag, [this, &k](){ this->init( k ); } ); + cl_int err; + cl_kernel kernel = clCreateKernel( my_cl_program, k.c_str(), &err ); + enforce_cl_retcode( err, std::string( "Failed to create kernel: " ) + k ); + return kernel; + } + + class file_reader { + public: + file_reader( const std::string& filepath ) { + std::ifstream file_descriptor( filepath, std::ifstream::binary ); + if ( !file_descriptor.is_open() ) { + std::string str = std::string( "Could not open file: " ) + filepath; + std::cerr << str << std::endl; + throw str; + } + file_descriptor.seekg( 0, file_descriptor.end ); + size_t length = size_t( file_descriptor.tellg() ); + file_descriptor.seekg( 0, file_descriptor.beg ); + my_content.resize( length ); + char* begin = &*my_content.begin(); + file_descriptor.read( begin, length ); + file_descriptor.close(); + } + const char* content() { return &*my_content.cbegin(); } + size_t length() { return my_content.length(); } + private: + std::string my_content; + }; + + class opencl_program_builder { + public: + typedef void (CL_CALLBACK *cl_callback_type)(cl_program, void*); + opencl_program_builder( Factory& f, const std::string& name, cl_program program, + cl_uint num_devices, cl_device_id* device_list, + const char* options, cl_callback_type callback, + void* user_data ) { + cl_int err = clBuildProgram( program, num_devices, device_list, options, + callback, user_data ); + if( err == CL_SUCCESS ) + return; + std::string str = std::string( "Failed to build program: " ) + name; + if ( err == CL_BUILD_PROGRAM_FAILURE ) { + const opencl_device_list &devices = f.devices(); + for ( auto d = devices.begin(); d != devices.end(); ++d ) { + std::cerr << "Build log for device: " << (*d).name() << std::endl; + size_t log_size; + cl_int query_err = clGetProgramBuildInfo( + program, (*d).my_cl_device_id, CL_PROGRAM_BUILD_LOG, 0, NULL, + &log_size ); + enforce_cl_retcode( query_err, "Failed to get build log size" ); + if( log_size ) { + std::vector output; + output.resize( log_size ); + query_err = clGetProgramBuildInfo( + program, (*d).my_cl_device_id, CL_PROGRAM_BUILD_LOG, + output.size(), output.data(), NULL ); + enforce_cl_retcode( query_err, "Failed to get build output" ); + std::cerr << output.data() << std::endl; + } else { + std::cerr << "No build log available" << std::endl; + } + } + } + enforce_cl_retcode( err, str ); + } + }; + + class opencl_device_filter { + public: + template + opencl_device_filter( cl_uint& num_devices, cl_device_id* device_list, + Filter filter, const char* message ) { + for ( cl_uint i = 0; i < num_devices; ++i ) + if ( filter(device_list[i]) ) { + device_list[i--] = device_list[--num_devices]; + } + if ( !num_devices ) + enforce_cl_retcode( CL_DEVICE_NOT_AVAILABLE, message ); + } + }; + + void init( const std::string& ) const { + cl_uint num_devices; + enforce_cl_retcode( clGetContextInfo( my_factory.context(), CL_CONTEXT_NUM_DEVICES, sizeof( num_devices ), &num_devices, NULL ), + "Failed to get OpenCL context info" ); + if ( !num_devices ) + enforce_cl_retcode( CL_DEVICE_NOT_FOUND, "No supported devices found" ); + cl_device_id *device_list = (cl_device_id *)alloca( num_devices*sizeof( cl_device_id ) ); + enforce_cl_retcode( clGetContextInfo( my_factory.context(), CL_CONTEXT_DEVICES, num_devices*sizeof( cl_device_id ), device_list, NULL ), + "Failed to get OpenCL context info" ); + const char *options = NULL; + switch ( my_type ) { + case opencl_program_type::SOURCE: { + file_reader fr( my_arg_str ); + const char *s[] = { fr.content() }; + const size_t l[] = { fr.length() }; + cl_int err; + my_cl_program = clCreateProgramWithSource( my_factory.context(), 1, s, l, &err ); + enforce_cl_retcode( err, std::string( "Failed to create program: " ) + my_arg_str ); + opencl_device_filter( + num_devices, device_list, + []( const opencl_device& d ) -> bool { + return !d.compiler_available() || !d.linker_available(); + }, "No one device supports building program from sources" ); + opencl_program_builder( + my_factory, my_arg_str, my_cl_program, num_devices, device_list, + options, /*callback*/ NULL, /*user data*/NULL ); + break; + } + case opencl_program_type::SPIR: + options = "-x spir"; + case opencl_program_type::PRECOMPILED: { + file_reader fr( my_arg_str ); + std::vector s( + num_devices, reinterpret_cast(fr.content()) ); + std::vector l( num_devices, fr.length() ); + std::vector bin_statuses( num_devices, -1 ); + cl_int err; + my_cl_program = clCreateProgramWithBinary( my_factory.context(), num_devices, + device_list, l.data(), s.data(), + bin_statuses.data(), &err ); + if( err != CL_SUCCESS ) { + std::string statuses_str; + for (auto st = bin_statuses.begin(); st != bin_statuses.end(); ++st) { + statuses_str += std::to_string((*st)); + } + + enforce_cl_retcode( err, std::string( "Failed to create program, error " + std::to_string( err ) + " : " ) + my_arg_str + + std::string( ", binary_statuses = " ) + statuses_str ); + } + opencl_program_builder( + my_factory, my_arg_str, my_cl_program, num_devices, device_list, + options, /*callback*/ NULL, /*user data*/NULL ); + break; + } + default: + __TBB_ASSERT( false, "Unsupported program type" ); + } + } + + Factory& my_factory; + opencl_program_type my_type; + std::string my_arg_str; + mutable cl_program my_cl_program; + mutable std::once_flag my_do_once_flag; + + template + friend class opencl_factory; + + friend class Factory::kernel; +}; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE opencl_node; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE +opencl_node< tuple, JP, Factory > : public streaming_node< tuple, JP, Factory > { + typedef streaming_node < tuple, JP, Factory > base_type; +public: + typedef typename base_type::kernel_type kernel_type; + + opencl_node( graph &g, const kernel_type& kernel ) + : base_type( g, kernel, opencl_info::default_device_selector< opencl_info::default_opencl_factory >(), opencl_info::default_factory() ) + { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_OPENCL_NODE, this, &this->my_graph ); + } + + opencl_node( graph &g, const kernel_type& kernel, Factory &f ) + : base_type( g, kernel, opencl_info::default_device_selector (), f ) + { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_OPENCL_NODE, this, &this->my_graph ); + } + + template + opencl_node( graph &g, const kernel_type& kernel, DeviceSelector d, Factory &f) + : base_type( g, kernel, d, f) + { + tbb::internal::fgt_multiinput_multioutput_node( CODEPTR(), tbb::internal::FLOW_OPENCL_NODE, this, &this->my_graph ); + } +}; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE +opencl_node< tuple, JP > : public opencl_node < tuple, JP, opencl_info::default_opencl_factory > { + typedef opencl_node < tuple, JP, opencl_info::default_opencl_factory > base_type; +public: + typedef typename base_type::kernel_type kernel_type; + + opencl_node( graph &g, const kernel_type& kernel ) + : base_type( g, kernel, opencl_info::default_device_selector< opencl_info::default_opencl_factory >(), opencl_info::default_factory() ) + {} + + template + opencl_node( graph &g, const kernel_type& kernel, DeviceSelector d ) + : base_type( g, kernel, d, opencl_info::default_factory() ) + {} +}; + +template +class __TBB_DEPRECATED_IN_VERBOSE_MODE +opencl_node< tuple > : public opencl_node < tuple, queueing, opencl_info::default_opencl_factory > { + typedef opencl_node < tuple, queueing, opencl_info::default_opencl_factory > base_type; +public: + typedef typename base_type::kernel_type kernel_type; + + opencl_node( graph &g, const kernel_type& kernel ) + : base_type( g, kernel, opencl_info::default_device_selector< opencl_info::default_opencl_factory >(), opencl_info::default_factory() ) + {} + + template + opencl_node( graph &g, const kernel_type& kernel, DeviceSelector d ) + : base_type( g, kernel, d, opencl_info::default_factory() ) + {} +}; + +} // namespace interfaceX + +using interface11::opencl_node; +using interface11::read_only; +using interface11::read_write; +using interface11::write_only; +using interface11::opencl_buffer; +using interface11::opencl_subbuffer; +using interface11::opencl_device; +using interface11::opencl_device_list; +using interface11::opencl_program; +using interface11::opencl_program_type; +using interface11::opencl_async_msg; +using interface11::opencl_factory; +using interface11::opencl_range; + +} // namespace flow +} // namespace tbb +#endif /* __TBB_PREVIEW_OPENCL_NODE */ + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_flow_graph_opencl_node_H_include_area + +#endif // __TBB_flow_graph_opencl_node_H diff --git a/ohos/arm64-v8a/include/tbb/global_control.h b/ohos/arm64-v8a/include/tbb/global_control.h new file mode 100644 index 00000000..bdcd59a5 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/global_control.h @@ -0,0 +1,78 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_global_control_H +#define __TBB_global_control_H + +#include "tbb_stddef.h" + +namespace tbb { +namespace interface9 { + +class global_control { +public: + enum parameter { + max_allowed_parallelism, + thread_stack_size, + parameter_max // insert new parameters above this point + }; + + global_control(parameter p, size_t value) : + my_value(value), my_next(NULL), my_param(p) { + __TBB_ASSERT(my_param < parameter_max, "Invalid parameter"); +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) + // For Windows 8 Store* apps it's impossible to set stack size + if (p==thread_stack_size) + return; +#elif __TBB_x86_64 && (_WIN32 || _WIN64) + if (p==thread_stack_size) + __TBB_ASSERT_RELEASE((unsigned)value == value, "Stack size is limited to unsigned int range"); +#endif + if (my_param==max_allowed_parallelism) + __TBB_ASSERT_RELEASE(my_value>0, "max_allowed_parallelism cannot be 0."); + internal_create(); + } + + ~global_control() { + __TBB_ASSERT(my_param < parameter_max, "Invalid parameter. Probably the object was corrupted."); +#if __TBB_WIN8UI_SUPPORT && (_WIN32_WINNT < 0x0A00) + // For Windows 8 Store* apps it's impossible to set stack size + if (my_param==thread_stack_size) + return; +#endif + internal_destroy(); + } + + static size_t active_value(parameter p) { + __TBB_ASSERT(p < parameter_max, "Invalid parameter"); + return active_value((int)p); + } +private: + size_t my_value; + global_control *my_next; + parameter my_param; + + void __TBB_EXPORTED_METHOD internal_create(); + void __TBB_EXPORTED_METHOD internal_destroy(); + static size_t __TBB_EXPORTED_FUNC active_value(int param); +}; +} // namespace interface9 + +using interface9::global_control; + +} // tbb + +#endif // __TBB_global_control_H diff --git a/ohos/arm64-v8a/include/tbb/index.html b/ohos/arm64-v8a/include/tbb/index.html new file mode 100644 index 00000000..9ead0e24 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/index.html @@ -0,0 +1,29 @@ + + + +

Overview

+Include files for Intel® Threading Building Blocks classes and functions. + +
Click here to see all files in the directory. + +

Directories

+
+
compat +
Include files for source level compatibility with other frameworks. +
internal +
Include files with implementation details; not for direct use. +
machine +
Include files for low-level architecture specific functionality; not for direct use. +
+ +
+Up to parent directory +

+Copyright © 2005-2020 Intel Corporation. All Rights Reserved. +

+Intel is a registered trademark or trademark of Intel Corporation +or its subsidiaries in the United States and other countries. +

+* Other names and brands may be claimed as the property of others. + + diff --git a/ohos/arm64-v8a/include/tbb/info.h b/ohos/arm64-v8a/include/tbb/info.h new file mode 100644 index 00000000..84b32092 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/info.h @@ -0,0 +1,52 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_info_H +#define __TBB_info_H + +#include "tbb_config.h" + +#if __TBB_NUMA_SUPPORT + +#include + +namespace tbb { + namespace internal { + namespace numa_topology { + unsigned nodes_count(); + void fill(int* indexes_array); + int default_concurrency(int node_id); + } //namespace numa_topology + } // namespace internal + + typedef int numa_node_id; + + namespace info { + inline std::vector numa_nodes() { + std::vector nodes_indexes(tbb::internal::numa_topology::nodes_count()); + internal::numa_topology::fill(&nodes_indexes.front()); + return nodes_indexes; + } + + inline int default_concurrency(numa_node_id id = -1) { + return internal::numa_topology::default_concurrency(id); + } + } // namespace info +} // namespace tbb + +#endif /*__TBB_NUMA_SUPPORT*/ + +#endif /*__TBB_info_H*/ diff --git a/ohos/arm64-v8a/include/tbb/internal/_aggregator_impl.h b/ohos/arm64-v8a/include/tbb/internal/_aggregator_impl.h new file mode 100644 index 00000000..6d89c055 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_aggregator_impl.h @@ -0,0 +1,180 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__aggregator_impl_H +#define __TBB__aggregator_impl_H + +#include "../atomic.h" +#if !__TBBMALLOC_BUILD +#include "../tbb_profiling.h" +#endif + +namespace tbb { +namespace interface6 { +namespace internal { + +using namespace tbb::internal; + +//! aggregated_operation base class +template +class aggregated_operation { + public: + //! Zero value means "wait" status, all other values are "user" specified values and are defined into the scope of a class which uses "status". + uintptr_t status; + + Derived *next; + aggregated_operation() : status(0), next(NULL) {} +}; + +//! Aggregator base class +/** An aggregator for collecting operations coming from multiple sources and executing + them serially on a single thread. operation_type must be derived from + aggregated_operation. The parameter handler_type is a functor that will be passed the + list of operations and is expected to handle each operation appropriately, setting the + status of each operation to non-zero.*/ +template < typename operation_type > +class aggregator_generic { +public: + aggregator_generic() : handler_busy(false) { pending_operations = NULL; } + + //! Execute an operation + /** Places an operation into the waitlist (pending_operations), and either handles the list, + or waits for the operation to complete, or returns. + The long_life_time parameter specifies the life time of the given operation object. + Operations with long_life_time == true may be accessed after execution. + A "short" life time operation (long_life_time == false) can be destroyed + during execution, and so any access to it after it was put into the waitlist, + including status check, is invalid. As a consequence, waiting for completion + of such operation causes undefined behavior. + */ + template < typename handler_type > + void execute(operation_type *op, handler_type &handle_operations, bool long_life_time = true) { + operation_type *res; + // op->status should be read before inserting the operation into the + // aggregator waitlist since it can become invalid after executing a + // handler (if the operation has 'short' life time.) + const uintptr_t status = op->status; + + // ITT note: &(op->status) tag is used to cover accesses to this op node. This + // thread has created the operation, and now releases it so that the handler + // thread may handle the associated operation w/o triggering a race condition; + // thus this tag will be acquired just before the operation is handled in the + // handle_operations functor. + call_itt_notify(releasing, &(op->status)); + // insert the operation in the queue. + do { + // Tools may flag the following line as a race; it is a false positive: + // This is an atomic read; we don't provide itt_hide_load_word for atomics + op->next = res = pending_operations; // NOT A RACE + } while (pending_operations.compare_and_swap(op, res) != res); + if (!res) { // first in the list; handle the operations. + // ITT note: &pending_operations tag covers access to the handler_busy flag, + // which this waiting handler thread will try to set before entering + // handle_operations. + call_itt_notify(acquired, &pending_operations); + start_handle_operations(handle_operations); + // The operation with 'short' life time can already be destroyed. + if (long_life_time) + __TBB_ASSERT(op->status, NULL); + } + // not first; wait for op to be ready. + else if (!status) { // operation is blocking here. + __TBB_ASSERT(long_life_time, "Waiting for an operation object that might be destroyed during processing."); + call_itt_notify(prepare, &(op->status)); + spin_wait_while_eq(op->status, uintptr_t(0)); + itt_load_word_with_acquire(op->status); + } + } + + private: + //! An atomically updated list (aka mailbox) of pending operations + atomic pending_operations; + //! Controls thread access to handle_operations + uintptr_t handler_busy; + + //! Trigger the handling of operations when the handler is free + template < typename handler_type > + void start_handle_operations( handler_type &handle_operations ) { + operation_type *op_list; + + // ITT note: &handler_busy tag covers access to pending_operations as it is passed + // between active and waiting handlers. Below, the waiting handler waits until + // the active handler releases, and the waiting handler acquires &handler_busy as + // it becomes the active_handler. The release point is at the end of this + // function, when all operations in pending_operations have been handled by the + // owner of this aggregator. + call_itt_notify(prepare, &handler_busy); + // get the handler_busy: + // only one thread can possibly spin here at a time + spin_wait_until_eq(handler_busy, uintptr_t(0)); + call_itt_notify(acquired, &handler_busy); + // acquire fence not necessary here due to causality rule and surrounding atomics + __TBB_store_with_release(handler_busy, uintptr_t(1)); + + // ITT note: &pending_operations tag covers access to the handler_busy flag + // itself. Capturing the state of the pending_operations signifies that + // handler_busy has been set and a new active handler will now process that list's + // operations. + call_itt_notify(releasing, &pending_operations); + // grab pending_operations + op_list = pending_operations.fetch_and_store(NULL); + + // handle all the operations + handle_operations(op_list); + + // release the handler + itt_store_word_with_release(handler_busy, uintptr_t(0)); + } +}; + +template < typename handler_type, typename operation_type > +class aggregator : public aggregator_generic { + handler_type handle_operations; +public: + aggregator() {} + explicit aggregator(handler_type h) : handle_operations(h) {} + + void initialize_handler(handler_type h) { handle_operations = h; } + + void execute(operation_type *op) { + aggregator_generic::execute(op, handle_operations); + } +}; + +// the most-compatible friend declaration (vs, gcc, icc) is +// template friend class aggregating_functor; +template +class aggregating_functor { + aggregating_class *fi; +public: + aggregating_functor() : fi() {} + aggregating_functor(aggregating_class *fi_) : fi(fi_) {} + void operator()(operation_list* op_list) { fi->handle_operations(op_list); } +}; + +} // namespace internal +} // namespace interface6 + +namespace internal { + using interface6::internal::aggregated_operation; + using interface6::internal::aggregator_generic; + using interface6::internal::aggregator; + using interface6::internal::aggregating_functor; +} // namespace internal + +} // namespace tbb + +#endif // __TBB__aggregator_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_allocator_traits.h b/ohos/arm64-v8a/include/tbb/internal/_allocator_traits.h new file mode 100644 index 00000000..eba986b7 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_allocator_traits.h @@ -0,0 +1,156 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_allocator_traits_H +#define __TBB_allocator_traits_H + +#include "../tbb_stddef.h" // true/false_type + +#if __TBB_ALLOCATOR_TRAITS_PRESENT +#include // for allocator_traits +#endif + +#if __TBB_CPP11_RVALUE_REF_PRESENT +#include // for std::move +#endif + +// For allocator_swap helper +#include __TBB_STD_SWAP_HEADER + +namespace tbb { +namespace internal { + +//! Internal implementation of allocator traits, propagate_on_* use internal boolean_constant. +//! In order to avoid code duplication, check what implementation of boolean constant will likely be passed. +#if __TBB_ALLOCATOR_TRAITS_PRESENT +typedef std::true_type traits_true_type; +typedef std::false_type traits_false_type; +#else +typedef tbb::internal::true_type traits_true_type; +typedef tbb::internal::false_type traits_false_type; +#endif + +//! Copy assignment implementation for allocator if propagate_on_container_copy_assignment == true_type +//! Noop if pocca == false_type +template +inline void allocator_copy_assignment(MyAlloc& my_allocator, OtherAlloc& other_allocator, traits_true_type) { + my_allocator = other_allocator; +} +template +inline void allocator_copy_assignment(MyAlloc&, OtherAlloc&, traits_false_type) { /* NO COPY */} + +#if __TBB_CPP11_RVALUE_REF_PRESENT +//! Move assignment implementation for allocator if propagate_on_container_move_assignment == true_type. +//! Noop if pocma == false_type. +template +inline void allocator_move_assignment(MyAlloc& my_allocator, OtherAlloc& other_allocator, traits_true_type) { + my_allocator = std::move(other_allocator); +} +template +inline void allocator_move_assignment(MyAlloc&, OtherAlloc&, traits_false_type) { /* NO MOVE */ } +#endif + +//! Swap implementation for allocators if propagate_on_container_swap == true_type. +//! Noop if pocs == false_type. +template +inline void allocator_swap(MyAlloc& my_allocator, OtherAlloc& other_allocator, traits_true_type) { + using std::swap; + swap(my_allocator, other_allocator); +} +template +inline void allocator_swap(MyAlloc&, OtherAlloc&, traits_false_type) { /* NO SWAP */ } + +#if __TBB_ALLOCATOR_TRAITS_PRESENT +using std::allocator_traits; +#else +//! Internal allocator_traits implementation, which relies on C++03 standard +//! [20.1.5] allocator requirements +template +struct allocator_traits { + // C++03 allocator doesn't have to be assignable or swappable, therefore + // define these traits as false_type to do not require additional operations + // that are not supposed to be in. + typedef tbb::internal::false_type propagate_on_container_move_assignment; + typedef tbb::internal::false_type propagate_on_container_copy_assignment; + typedef tbb::internal::false_type propagate_on_container_swap; + + typedef Alloc allocator_type; + typedef typename allocator_type::value_type value_type; + + typedef typename allocator_type::pointer pointer; + typedef typename allocator_type::const_pointer const_pointer; + typedef typename allocator_type::difference_type difference_type; + typedef typename allocator_type::size_type size_type; + + template struct rebind_alloc { + typedef typename Alloc::template rebind::other other; + }; + + static pointer allocate(Alloc& a, size_type n) { + return a.allocate(n); + } + + static void deallocate(Alloc& a, pointer p, size_type n) { + a.deallocate(p, n); + } + + template + static void construct(Alloc&, PT* p) { + ::new (static_cast(p)) PT(); + } + + template + static void construct(Alloc&, PT* p, __TBB_FORWARDING_REF(T1) t1) { + ::new (static_cast(p)) PT(tbb::internal::forward(t1)); + } + + template + static void construct(Alloc&, PT* p, __TBB_FORWARDING_REF(T1) t1, __TBB_FORWARDING_REF(T2) t2) { + ::new (static_cast(p)) PT(tbb::internal::forward(t1), tbb::internal::forward(t2)); + } + + template + static void construct(Alloc&, PT* p, __TBB_FORWARDING_REF(T1) t1, + __TBB_FORWARDING_REF(T2) t2, __TBB_FORWARDING_REF(T3) t3) { + ::new (static_cast(p)) PT(tbb::internal::forward(t1), tbb::internal::forward(t2), + tbb::internal::forward(t3)); + } + + template + static void destroy(Alloc&, T* p) { + p->~T(); + tbb::internal::suppress_unused_warning(p); + } + + static Alloc select_on_container_copy_construction(const Alloc& a) { return a; } +}; +#endif // __TBB_ALLOCATOR_TRAITS_PRESENT + +//! C++03/C++11 compliant rebind helper, even if no std::allocator_traits available +//! or rebind is not defined for allocator type +template +struct allocator_rebind { +#if __TBB_ALLOCATOR_TRAITS_PRESENT + typedef typename allocator_traits::template rebind_alloc type; +#else + typedef typename allocator_traits::template rebind_alloc::other type; +#endif +}; + +}} // namespace tbb::internal + +#endif // __TBB_allocator_traits_H + diff --git a/ohos/arm64-v8a/include/tbb/internal/_concurrent_queue_impl.h b/ohos/arm64-v8a/include/tbb/internal/_concurrent_queue_impl.h new file mode 100644 index 00000000..e3bef772 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_concurrent_queue_impl.h @@ -0,0 +1,1081 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__concurrent_queue_impl_H +#define __TBB__concurrent_queue_impl_H + +#ifndef __TBB_concurrent_queue_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "../tbb_stddef.h" +#include "../tbb_machine.h" +#include "../atomic.h" +#include "../spin_mutex.h" +#include "../cache_aligned_allocator.h" +#include "../tbb_exception.h" +#include "../tbb_profiling.h" +#include +#include __TBB_STD_SWAP_HEADER +#include + +namespace tbb { + +#if !__TBB_TEMPLATE_FRIENDS_BROKEN + +// forward declaration +namespace strict_ppl { +template class concurrent_queue; +} + +template class concurrent_bounded_queue; + +#endif + +//! For internal use only. +namespace strict_ppl { + +//! @cond INTERNAL +namespace internal { + +using namespace tbb::internal; + +typedef size_t ticket; + +template class micro_queue ; +template class micro_queue_pop_finalizer ; +template class concurrent_queue_base_v3; +template struct concurrent_queue_rep; + +//! parts of concurrent_queue_rep that do not have references to micro_queue +/** + * For internal use only. + */ +struct concurrent_queue_rep_base : no_copy { + template friend class micro_queue; + template friend class concurrent_queue_base_v3; + +protected: + //! Approximately n_queue/golden ratio + static const size_t phi = 3; + +public: + // must be power of 2 + static const size_t n_queue = 8; + + //! Prefix on a page + struct page { + page* next; + uintptr_t mask; + }; + + atomic head_counter; + char pad1[NFS_MaxLineSize-sizeof(atomic)]; + atomic tail_counter; + char pad2[NFS_MaxLineSize-sizeof(atomic)]; + + //! Always a power of 2 + size_t items_per_page; + + //! Size of an item + size_t item_size; + + //! number of invalid entries in the queue + atomic n_invalid_entries; + + char pad3[NFS_MaxLineSize-sizeof(size_t)-sizeof(size_t)-sizeof(atomic)]; +} ; + +inline bool is_valid_page(const concurrent_queue_rep_base::page* p) { + return uintptr_t(p)>1; +} + +//! Abstract class to define interface for page allocation/deallocation +/** + * For internal use only. + */ +class concurrent_queue_page_allocator +{ + template friend class micro_queue ; + template friend class micro_queue_pop_finalizer ; +protected: + virtual ~concurrent_queue_page_allocator() {} +private: + virtual concurrent_queue_rep_base::page* allocate_page() = 0; + virtual void deallocate_page( concurrent_queue_rep_base::page* p ) = 0; +} ; + +#if _MSC_VER && !defined(__INTEL_COMPILER) +// unary minus operator applied to unsigned type, result still unsigned +#pragma warning( push ) +#pragma warning( disable: 4146 ) +#endif + +//! A queue using simple locking. +/** For efficiency, this class has no constructor. + The caller is expected to zero-initialize it. */ +template +class micro_queue : no_copy { +public: + typedef void (*item_constructor_t)(T* location, const void* src); +private: + typedef concurrent_queue_rep_base::page page; + + //! Class used to ensure exception-safety of method "pop" + class destroyer: no_copy { + T& my_value; + public: + destroyer( T& value ) : my_value(value) {} + ~destroyer() {my_value.~T();} + }; + + void copy_item( page& dst, size_t dindex, const void* src, item_constructor_t construct_item ) { + construct_item( &get_ref(dst, dindex), src ); + } + + void copy_item( page& dst, size_t dindex, const page& src, size_t sindex, + item_constructor_t construct_item ) + { + T& src_item = get_ref( const_cast(src), sindex ); + construct_item( &get_ref(dst, dindex), static_cast(&src_item) ); + } + + void assign_and_destroy_item( void* dst, page& src, size_t index ) { + T& from = get_ref(src,index); + destroyer d(from); + *static_cast(dst) = tbb::internal::move( from ); + } + + void spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const ; + +public: + friend class micro_queue_pop_finalizer; + + struct padded_page: page { + //! Not defined anywhere - exists to quiet warnings. + padded_page(); + //! Not defined anywhere - exists to quiet warnings. + void operator=( const padded_page& ); + //! Must be last field. + T last; + }; + + static T& get_ref( page& p, size_t index ) { + return (&static_cast(static_cast(&p))->last)[index]; + } + + atomic head_page; + atomic head_counter; + + atomic tail_page; + atomic tail_counter; + + spin_mutex page_mutex; + + void push( const void* item, ticket k, concurrent_queue_base_v3& base, + item_constructor_t construct_item ) ; + + bool pop( void* dst, ticket k, concurrent_queue_base_v3& base ) ; + + micro_queue& assign( const micro_queue& src, concurrent_queue_base_v3& base, + item_constructor_t construct_item ) ; + + page* make_copy( concurrent_queue_base_v3& base, const page* src_page, size_t begin_in_page, + size_t end_in_page, ticket& g_index, item_constructor_t construct_item ) ; + + void invalidate_page_and_rethrow( ticket k ) ; +}; + +template +void micro_queue::spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const { + for( atomic_backoff b(true);;b.pause() ) { + ticket c = counter; + if( c==k ) return; + else if( c&1 ) { + ++rb.n_invalid_entries; + throw_exception( eid_bad_last_alloc ); + } + } +} + +template +void micro_queue::push( const void* item, ticket k, concurrent_queue_base_v3& base, + item_constructor_t construct_item ) +{ + k &= -concurrent_queue_rep_base::n_queue; + page* p = NULL; + size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page); + if( !index ) { + __TBB_TRY { + concurrent_queue_page_allocator& pa = base; + p = pa.allocate_page(); + } __TBB_CATCH (...) { + ++base.my_rep->n_invalid_entries; + invalidate_page_and_rethrow( k ); + } + p->mask = 0; + p->next = NULL; + } + + if( tail_counter != k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep ); + call_itt_notify(acquired, &tail_counter); + + if( p ) { + spin_mutex::scoped_lock lock( page_mutex ); + page* q = tail_page; + if( is_valid_page(q) ) + q->next = p; + else + head_page = p; + tail_page = p; + } else { + p = tail_page; + } + + __TBB_TRY { + copy_item( *p, index, item, construct_item ); + // If no exception was thrown, mark item as present. + itt_hide_store_word(p->mask, p->mask | uintptr_t(1)<n_invalid_entries; + call_itt_notify(releasing, &tail_counter); + tail_counter += concurrent_queue_rep_base::n_queue; + __TBB_RETHROW(); + } +} + +template +bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base_v3& base ) { + k &= -concurrent_queue_rep_base::n_queue; + if( head_counter!=k ) spin_wait_until_eq( head_counter, k ); + call_itt_notify(acquired, &head_counter); + if( tail_counter==k ) spin_wait_while_eq( tail_counter, k ); + call_itt_notify(acquired, &tail_counter); + page *p = head_page; + __TBB_ASSERT( p, NULL ); + size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); + bool success = false; + { + micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? p : NULL ); + if( p->mask & uintptr_t(1)<n_invalid_entries; + } + } + return success; +} + +template +micro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base_v3& base, + item_constructor_t construct_item ) +{ + head_counter = src.head_counter; + tail_counter = src.tail_counter; + + const page* srcp = src.head_page; + if( is_valid_page(srcp) ) { + ticket g_index = head_counter; + __TBB_TRY { + size_t n_items = (tail_counter-head_counter)/concurrent_queue_rep_base::n_queue; + size_t index = modulo_power_of_two( head_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); + size_t end_in_first_page = (index+n_itemsitems_per_page)?(index+n_items):base.my_rep->items_per_page; + + head_page = make_copy( base, srcp, index, end_in_first_page, g_index, construct_item ); + page* cur_page = head_page; + + if( srcp != src.tail_page ) { + for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) { + cur_page->next = make_copy( base, srcp, 0, base.my_rep->items_per_page, g_index, construct_item ); + cur_page = cur_page->next; + } + + __TBB_ASSERT( srcp==src.tail_page, NULL ); + size_t last_index = modulo_power_of_two( tail_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page ); + if( last_index==0 ) last_index = base.my_rep->items_per_page; + + cur_page->next = make_copy( base, srcp, 0, last_index, g_index, construct_item ); + cur_page = cur_page->next; + } + tail_page = cur_page; + } __TBB_CATCH (...) { + invalidate_page_and_rethrow( g_index ); + } + } else { + head_page = tail_page = NULL; + } + return *this; +} + +template +void micro_queue::invalidate_page_and_rethrow( ticket k ) { + // Append an invalid page at address 1 so that no more pushes are allowed. + page* invalid_page = (page*)uintptr_t(1); + { + spin_mutex::scoped_lock lock( page_mutex ); + itt_store_word_with_release(tail_counter, k+concurrent_queue_rep_base::n_queue+1); + page* q = tail_page; + if( is_valid_page(q) ) + q->next = invalid_page; + else + head_page = invalid_page; + tail_page = invalid_page; + } + __TBB_RETHROW(); +} + +template +concurrent_queue_rep_base::page* micro_queue::make_copy( concurrent_queue_base_v3& base, + const concurrent_queue_rep_base::page* src_page, size_t begin_in_page, size_t end_in_page, + ticket& g_index, item_constructor_t construct_item ) +{ + concurrent_queue_page_allocator& pa = base; + page* new_page = pa.allocate_page(); + new_page->next = NULL; + new_page->mask = src_page->mask; + for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index ) + if( new_page->mask & uintptr_t(1)< +class micro_queue_pop_finalizer: no_copy { + typedef concurrent_queue_rep_base::page page; + ticket my_ticket; + micro_queue& my_queue; + page* my_page; + concurrent_queue_page_allocator& allocator; +public: + micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base_v3& b, ticket k, page* p ) : + my_ticket(k), my_queue(queue), my_page(p), allocator(b) + {} + ~micro_queue_pop_finalizer() ; +}; + +template +micro_queue_pop_finalizer::~micro_queue_pop_finalizer() { + page* p = my_page; + if( is_valid_page(p) ) { + spin_mutex::scoped_lock lock( my_queue.page_mutex ); + page* q = p->next; + my_queue.head_page = q; + if( !is_valid_page(q) ) { + my_queue.tail_page = NULL; + } + } + itt_store_word_with_release(my_queue.head_counter, my_ticket); + if( is_valid_page(p) ) { + allocator.deallocate_page( p ); + } +} + +#if _MSC_VER && !defined(__INTEL_COMPILER) +#pragma warning( pop ) +#endif // warning 4146 is back + +template class concurrent_queue_iterator_rep ; +template class concurrent_queue_iterator_base_v3; + +//! representation of concurrent_queue_base +/** + * the class inherits from concurrent_queue_rep_base and defines an array of micro_queue's + */ +template +struct concurrent_queue_rep : public concurrent_queue_rep_base { + micro_queue array[n_queue]; + + //! Map ticket to an array index + static size_t index( ticket k ) { + return k*phi%n_queue; + } + + micro_queue& choose( ticket k ) { + // The formula here approximates LRU in a cache-oblivious way. + return array[index(k)]; + } +}; + +//! base class of concurrent_queue +/** + * The class implements the interface defined by concurrent_queue_page_allocator + * and has a pointer to an instance of concurrent_queue_rep. + */ +template +class concurrent_queue_base_v3: public concurrent_queue_page_allocator { +private: + //! Internal representation + concurrent_queue_rep* my_rep; + + friend struct concurrent_queue_rep; + friend class micro_queue; + friend class concurrent_queue_iterator_rep; + friend class concurrent_queue_iterator_base_v3; + +protected: + typedef typename concurrent_queue_rep::page page; + +private: + typedef typename micro_queue::padded_page padded_page; + typedef typename micro_queue::item_constructor_t item_constructor_t; + + virtual page *allocate_page() __TBB_override { + concurrent_queue_rep& r = *my_rep; + size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); + return reinterpret_cast(allocate_block ( n )); + } + + virtual void deallocate_page( concurrent_queue_rep_base::page *p ) __TBB_override { + concurrent_queue_rep& r = *my_rep; + size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); + deallocate_block( reinterpret_cast(p), n ); + } + + //! custom allocator + virtual void *allocate_block( size_t n ) = 0; + + //! custom de-allocator + virtual void deallocate_block( void *p, size_t n ) = 0; + +protected: + concurrent_queue_base_v3(); + + virtual ~concurrent_queue_base_v3() { +#if TBB_USE_ASSERT + size_t nq = my_rep->n_queue; + for( size_t i=0; iarray[i].tail_page==NULL, "pages were not freed properly" ); +#endif /* TBB_USE_ASSERT */ + cache_aligned_allocator >().deallocate(my_rep,1); + } + + //! Enqueue item at tail of queue + void internal_push( const void* src, item_constructor_t construct_item ) { + concurrent_queue_rep& r = *my_rep; + ticket k = r.tail_counter++; + r.choose(k).push( src, k, *this, construct_item ); + } + + //! Attempt to dequeue item from queue. + /** NULL if there was no item to dequeue. */ + bool internal_try_pop( void* dst ) ; + + //! Get size of queue; result may be invalid if queue is modified concurrently + size_t internal_size() const ; + + //! check if the queue is empty; thread safe + bool internal_empty() const ; + + //! free any remaining pages + /* note that the name may be misleading, but it remains so due to a historical accident. */ + void internal_finish_clear() ; + + //! Obsolete + void internal_throw_exception() const { + throw_exception( eid_bad_alloc ); + } + + //! copy or move internal representation + void assign( const concurrent_queue_base_v3& src, item_constructor_t construct_item ) ; + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! swap internal representation + void internal_swap( concurrent_queue_base_v3& src ) { + std::swap( my_rep, src.my_rep ); + } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ +}; + +template +concurrent_queue_base_v3::concurrent_queue_base_v3() { + const size_t item_size = sizeof(T); + my_rep = cache_aligned_allocator >().allocate(1); + __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); + __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); + __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); + __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); + memset(static_cast(my_rep),0,sizeof(concurrent_queue_rep)); + my_rep->item_size = item_size; + my_rep->items_per_page = item_size<= 8 ? 32 : + item_size<= 16 ? 16 : + item_size<= 32 ? 8 : + item_size<= 64 ? 4 : + item_size<=128 ? 2 : + 1; +} + +template +bool concurrent_queue_base_v3::internal_try_pop( void* dst ) { + concurrent_queue_rep& r = *my_rep; + ticket k; + do { + k = r.head_counter; + for(;;) { + if( (ptrdiff_t)(r.tail_counter-k)<=0 ) { + // Queue is empty + return false; + } + // Queue had item with ticket k when we looked. Attempt to get that item. + ticket tk=k; +#if defined(_MSC_VER) && defined(_Wp64) + #pragma warning (push) + #pragma warning (disable: 4267) +#endif + k = r.head_counter.compare_and_swap( tk+1, tk ); +#if defined(_MSC_VER) && defined(_Wp64) + #pragma warning (pop) +#endif + if( k==tk ) + break; + // Another thread snatched the item, retry. + } + } while( !r.choose( k ).pop( dst, k, *this ) ); + return true; +} + +template +size_t concurrent_queue_base_v3::internal_size() const { + concurrent_queue_rep& r = *my_rep; + __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); + ticket hc = r.head_counter; + size_t nie = r.n_invalid_entries; + ticket tc = r.tail_counter; + __TBB_ASSERT( hc!=tc || !nie, NULL ); + ptrdiff_t sz = tc-hc-nie; + return sz<0 ? 0 : size_t(sz); +} + +template +bool concurrent_queue_base_v3::internal_empty() const { + concurrent_queue_rep& r = *my_rep; + ticket tc = r.tail_counter; + ticket hc = r.head_counter; + // if tc!=r.tail_counter, the queue was not empty at some point between the two reads. + return tc==r.tail_counter && tc==hc+r.n_invalid_entries ; +} + +template +void concurrent_queue_base_v3::internal_finish_clear() { + concurrent_queue_rep& r = *my_rep; + size_t nq = r.n_queue; + for( size_t i=0; i +void concurrent_queue_base_v3::assign( const concurrent_queue_base_v3& src, + item_constructor_t construct_item ) +{ + concurrent_queue_rep& r = *my_rep; + r.items_per_page = src.my_rep->items_per_page; + + // copy concurrent_queue_rep data + r.head_counter = src.my_rep->head_counter; + r.tail_counter = src.my_rep->tail_counter; + r.n_invalid_entries = src.my_rep->n_invalid_entries; + + // copy or move micro_queues + for( size_t i = 0; i < r.n_queue; ++i ) + r.array[i].assign( src.my_rep->array[i], *this, construct_item); + + __TBB_ASSERT( r.head_counter==src.my_rep->head_counter && r.tail_counter==src.my_rep->tail_counter, + "the source concurrent queue should not be concurrently modified." ); +} + +template class concurrent_queue_iterator; + +template +class concurrent_queue_iterator_rep: no_assign { + typedef typename micro_queue::padded_page padded_page; +public: + ticket head_counter; + const concurrent_queue_base_v3& my_queue; + typename concurrent_queue_base_v3::page* array[concurrent_queue_rep::n_queue]; + concurrent_queue_iterator_rep( const concurrent_queue_base_v3& queue ) : + head_counter(queue.my_rep->head_counter), + my_queue(queue) + { + for( size_t k=0; k::n_queue; ++k ) + array[k] = queue.my_rep->array[k].head_page; + } + + //! Set item to point to kth element. Return true if at end of queue or item is marked valid; false otherwise. + bool get_item( T*& item, size_t k ) ; +}; + +template +bool concurrent_queue_iterator_rep::get_item( T*& item, size_t k ) { + if( k==my_queue.my_rep->tail_counter ) { + item = NULL; + return true; + } else { + typename concurrent_queue_base_v3::page* p = array[concurrent_queue_rep::index(k)]; + __TBB_ASSERT(p,NULL); + size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, my_queue.my_rep->items_per_page ); + item = µ_queue::get_ref(*p,i); + return (p->mask & uintptr_t(1)< +class concurrent_queue_iterator_base_v3 { + //! Represents concurrent_queue over which we are iterating. + /** NULL if one past last element in queue. */ + concurrent_queue_iterator_rep* my_rep; + + template + friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); + + template + friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); +protected: + //! Pointer to current item + Value* my_item; + + //! Default constructor + concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) { +#if __TBB_GCC_OPTIMIZER_ORDERING_BROKEN + __TBB_compiler_fence(); +#endif + } + + //! Copy constructor + concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) + : my_rep(NULL), my_item(NULL) { + assign(i); + } + + concurrent_queue_iterator_base_v3& operator=( const concurrent_queue_iterator_base_v3& i ) { + assign(i); + return *this; + } + + //! Construct iterator pointing to head of queue. + concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) ; + + //! Assignment + void assign( const concurrent_queue_iterator_base_v3& other ) ; + + //! Advance iterator one step towards tail of queue. + void advance() ; + + //! Destructor + ~concurrent_queue_iterator_base_v3() { + cache_aligned_allocator >().deallocate(my_rep, 1); + my_rep = NULL; + } +}; + +template +concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) { + my_rep = cache_aligned_allocator >().allocate(1); + new( my_rep ) concurrent_queue_iterator_rep(queue); + size_t k = my_rep->head_counter; + if( !my_rep->get_item(my_item, k) ) advance(); +} + +template +void concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base_v3& other ) { + if( my_rep!=other.my_rep ) { + if( my_rep ) { + cache_aligned_allocator >().deallocate(my_rep, 1); + my_rep = NULL; + } + if( other.my_rep ) { + my_rep = cache_aligned_allocator >().allocate(1); + new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep ); + } + } + my_item = other.my_item; +} + +template +void concurrent_queue_iterator_base_v3::advance() { + __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); + size_t k = my_rep->head_counter; + const concurrent_queue_base_v3& queue = my_rep->my_queue; +#if TBB_USE_ASSERT + Value* tmp; + my_rep->get_item(tmp,k); + __TBB_ASSERT( my_item==tmp, NULL ); +#endif /* TBB_USE_ASSERT */ + size_t i = modulo_power_of_two( k/concurrent_queue_rep::n_queue, queue.my_rep->items_per_page ); + if( i==queue.my_rep->items_per_page-1 ) { + typename concurrent_queue_base_v3::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; + root = root->next; + } + // advance k + my_rep->head_counter = ++k; + if( !my_rep->get_item(my_item, k) ) advance(); +} + +//! Similar to C++0x std::remove_cv +/** "tbb_" prefix added to avoid overload confusion with C++0x implementations. */ +template struct tbb_remove_cv {typedef T type;}; +template struct tbb_remove_cv {typedef T type;}; +template struct tbb_remove_cv {typedef T type;}; +template struct tbb_remove_cv {typedef T type;}; + +//! Meets requirements of a forward iterator for STL. +/** Value is either the T or const T type of the container. + @ingroup containers */ +template +class concurrent_queue_iterator: public concurrent_queue_iterator_base_v3::type>, + public std::iterator { +#if !__TBB_TEMPLATE_FRIENDS_BROKEN + template + friend class ::tbb::strict_ppl::concurrent_queue; +#else +public: +#endif + //! Construct iterator pointing to head of queue. + explicit concurrent_queue_iterator( const concurrent_queue_base_v3::type>& queue ) : + concurrent_queue_iterator_base_v3::type>(queue) + { + } + +public: + concurrent_queue_iterator() {} + + /** If Value==Container::value_type, then this routine is the copy constructor. + If Value==const Container::value_type, then this routine is a conversion constructor. */ + concurrent_queue_iterator( const concurrent_queue_iterator& other ) : + concurrent_queue_iterator_base_v3::type>(other) + {} + + //! Iterator assignment + concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { + concurrent_queue_iterator_base_v3::type>::operator=(other); + return *this; + } + + //! Reference to current item + Value& operator*() const { + return *static_cast(this->my_item); + } + + Value* operator->() const {return &operator*();} + + //! Advance to next item in queue + concurrent_queue_iterator& operator++() { + this->advance(); + return *this; + } + + //! Post increment + Value* operator++(int) { + Value* result = &operator*(); + operator++(); + return result; + } +}; // concurrent_queue_iterator + + +template +bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { + return i.my_item==j.my_item; +} + +template +bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { + return i.my_item!=j.my_item; +} + +} // namespace internal + +//! @endcond + +} // namespace strict_ppl + +//! @cond INTERNAL +namespace internal { + +class concurrent_queue_rep; +class concurrent_queue_iterator_rep; +class concurrent_queue_iterator_base_v3; +template class concurrent_queue_iterator; + +//! For internal use only. +/** Type-independent portion of concurrent_queue. + @ingroup containers */ +class concurrent_queue_base_v3: no_copy { +private: + //! Internal representation + concurrent_queue_rep* my_rep; + + friend class concurrent_queue_rep; + friend struct micro_queue; + friend class micro_queue_pop_finalizer; + friend class concurrent_queue_iterator_rep; + friend class concurrent_queue_iterator_base_v3; +protected: + //! Prefix on a page + struct page { + page* next; + uintptr_t mask; + }; + + //! Capacity of the queue + ptrdiff_t my_capacity; + + //! Always a power of 2 + size_t items_per_page; + + //! Size of an item + size_t item_size; + + enum copy_specifics { copy, move }; + +#if __TBB_PROTECTED_NESTED_CLASS_BROKEN +public: +#endif + template + struct padded_page: page { + //! Not defined anywhere - exists to quiet warnings. + padded_page(); + //! Not defined anywhere - exists to quiet warnings. + void operator=( const padded_page& ); + //! Must be last field. + T last; + }; + +private: + virtual void copy_item( page& dst, size_t index, const void* src ) = 0; + virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0; +protected: + __TBB_EXPORTED_METHOD concurrent_queue_base_v3( size_t item_size ); + virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3(); + + //! Enqueue item at tail of queue using copy operation + void __TBB_EXPORTED_METHOD internal_push( const void* src ); + + //! Dequeue item from head of queue + void __TBB_EXPORTED_METHOD internal_pop( void* dst ); + + //! Abort all pending queue operations + void __TBB_EXPORTED_METHOD internal_abort(); + + //! Attempt to enqueue item onto queue using copy operation + bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src ); + + //! Attempt to dequeue item from queue. + /** NULL if there was no item to dequeue. */ + bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst ); + + //! Get size of queue + ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; + + //! Check if the queue is empty + bool __TBB_EXPORTED_METHOD internal_empty() const; + + //! Set the queue capacity + void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size ); + + //! custom allocator + virtual page *allocate_page() = 0; + + //! custom de-allocator + virtual void deallocate_page( page *p ) = 0; + + //! free any remaining pages + /* note that the name may be misleading, but it remains so due to a historical accident. */ + void __TBB_EXPORTED_METHOD internal_finish_clear() ; + + //! throw an exception + void __TBB_EXPORTED_METHOD internal_throw_exception() const; + + //! copy internal representation + void __TBB_EXPORTED_METHOD assign( const concurrent_queue_base_v3& src ) ; + +#if __TBB_CPP11_RVALUE_REF_PRESENT + //! swap queues + void internal_swap( concurrent_queue_base_v3& src ) { + std::swap( my_capacity, src.my_capacity ); + std::swap( items_per_page, src.items_per_page ); + std::swap( item_size, src.item_size ); + std::swap( my_rep, src.my_rep ); + } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + + //! Enqueues item at tail of queue using specified operation (copy or move) + void internal_insert_item( const void* src, copy_specifics op_type ); + + //! Attempts to enqueue at tail of queue using specified operation (copy or move) + bool internal_insert_if_not_full( const void* src, copy_specifics op_type ); + + //! Assigns one queue to another using specified operation (copy or move) + void internal_assign( const concurrent_queue_base_v3& src, copy_specifics op_type ); +private: + virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0; +}; + +//! For internal use only. +/** Backward compatible modification of concurrent_queue_base_v3 + @ingroup containers */ +class concurrent_queue_base_v8: public concurrent_queue_base_v3 { +protected: + concurrent_queue_base_v8( size_t item_sz ) : concurrent_queue_base_v3( item_sz ) {} + + //! move items + void __TBB_EXPORTED_METHOD move_content( concurrent_queue_base_v8& src ) ; + + //! Attempt to enqueue item onto queue using move operation + bool __TBB_EXPORTED_METHOD internal_push_move_if_not_full( const void* src ); + + //! Enqueue item at tail of queue using move operation + void __TBB_EXPORTED_METHOD internal_push_move( const void* src ); +private: + friend struct micro_queue; + virtual void move_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0; + virtual void move_item( page& dst, size_t index, const void* src ) = 0; +}; + +//! Type-independent portion of concurrent_queue_iterator. +/** @ingroup containers */ +class concurrent_queue_iterator_base_v3 { + //! concurrent_queue over which we are iterating. + /** NULL if one past last element in queue. */ + concurrent_queue_iterator_rep* my_rep; + + template + friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); + + template + friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); + + void initialize( const concurrent_queue_base_v3& queue, size_t offset_of_data ); +protected: + //! Pointer to current item + void* my_item; + + //! Default constructor + concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {} + + //! Copy constructor + concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) : my_rep(NULL), my_item(NULL) { + assign(i); + } + + concurrent_queue_iterator_base_v3& operator=( const concurrent_queue_iterator_base_v3& i ) { + assign(i); + return *this; + } + + //! Obsolete entry point for constructing iterator pointing to head of queue. + /** Does not work correctly for SSE types. */ + __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ); + + //! Construct iterator pointing to head of queue. + __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue, size_t offset_of_data ); + + //! Assignment + void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base_v3& i ); + + //! Advance iterator one step towards tail of queue. + void __TBB_EXPORTED_METHOD advance(); + + //! Destructor + __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base_v3(); +}; + +typedef concurrent_queue_iterator_base_v3 concurrent_queue_iterator_base; + +//! Meets requirements of a forward iterator for STL. +/** Value is either the T or const T type of the container. + @ingroup containers */ +template +class concurrent_queue_iterator: public concurrent_queue_iterator_base, + public std::iterator { + +#if !__TBB_TEMPLATE_FRIENDS_BROKEN + template + friend class ::tbb::concurrent_bounded_queue; +#else +public: +#endif + + //! Construct iterator pointing to head of queue. + explicit concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : + concurrent_queue_iterator_base_v3(queue,__TBB_offsetof(concurrent_queue_base_v3::padded_page,last)) + { + } + +public: + concurrent_queue_iterator() {} + + /** If Value==Container::value_type, then this routine is the copy constructor. + If Value==const Container::value_type, then this routine is a conversion constructor. */ + concurrent_queue_iterator( const concurrent_queue_iterator& other ) : + concurrent_queue_iterator_base_v3(other) + {} + + //! Iterator assignment + concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { + concurrent_queue_iterator_base_v3::operator=(other); + return *this; + } + + //! Reference to current item + Value& operator*() const { + return *static_cast(my_item); + } + + Value* operator->() const {return &operator*();} + + //! Advance to next item in queue + concurrent_queue_iterator& operator++() { + advance(); + return *this; + } + + //! Post increment + Value* operator++(int) { + Value* result = &operator*(); + operator++(); + return result; + } +}; // concurrent_queue_iterator + + +template +bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { + return i.my_item==j.my_item; +} + +template +bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { + return i.my_item!=j.my_item; +} + +} // namespace internal; + +//! @endcond + +} // namespace tbb + +#endif /* __TBB__concurrent_queue_impl_H */ diff --git a/ohos/arm64-v8a/include/tbb/internal/_concurrent_skip_list_impl.h b/ohos/arm64-v8a/include/tbb/internal/_concurrent_skip_list_impl.h new file mode 100644 index 00000000..8cc1a11d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_concurrent_skip_list_impl.h @@ -0,0 +1,1085 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_concurrent_skip_list_H +#define __TBB_concurrent_skip_list_H + +#if !defined(__TBB_concurrent_map_H) && !defined(__TBB_concurrent_set_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "../tbb_config.h" +#include "../tbb_stddef.h" +#include "../tbb_allocator.h" +#include "../spin_mutex.h" +#include "../tbb_exception.h" +#include "../enumerable_thread_specific.h" +#include "_allocator_traits.h" +#include "_template_helpers.h" +#include "_node_handle_impl.h" +#include // Need std::pair +#include +#include +#include // Need std::allocator_traits +#include +#include +#include +#include +#include +#include +#include +#include + +#if _MSC_VER +#pragma warning(disable: 4189) // warning 4189 -- local variable is initialized but not referenced +#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it +#endif + +namespace tbb { +namespace interface10 { +namespace internal { + +template +class skip_list_node { + +public: + using value_type = Value; + using size_type = std::size_t; + using reference = value_type & ; + using const_reference = const value_type & ; + using pointer = value_type * ; + using const_pointer = const value_type *; + using node_pointer = skip_list_node * ; + using atomic_node_pointer = std::atomic; + + using mutex_type = Mutex; + using lock_type = std::unique_lock; + + skip_list_node(size_type levels) : my_height(levels), my_fullyLinked(false) { + for (size_type lev = 0; lev < my_height; ++lev) + new(&my_next(lev)) atomic_node_pointer(nullptr); + __TBB_ASSERT(height() == levels, "Wrong node height"); + } + + ~skip_list_node() { + for(size_type lev = 0; lev < my_height; ++lev) + my_next(lev).~atomic(); + } + + skip_list_node(const skip_list_node&) = delete; + + skip_list_node(skip_list_node&&) = delete; + + skip_list_node& operator=(const skip_list_node&) = delete; + + pointer storage() { + return reinterpret_cast(&my_val); + } + + reference value() { + return *storage(); + } + + node_pointer next(size_type level) const { + __TBB_ASSERT(level < height(), "Cannot get next on the level greater than height"); + return my_next(level).load(std::memory_order_acquire); + } + + void set_next(size_type level, node_pointer next) { + __TBB_ASSERT(level < height(), "Cannot set next on the level greater than height"); + + my_next(level).store(next, std::memory_order_release); + } + + /** @return number of layers */ + size_type height() const { + return my_height; + } + + bool fully_linked() const { + return my_fullyLinked.load(std::memory_order_acquire); + } + + void mark_linked() { + my_fullyLinked.store(true, std::memory_order_release); + } + + lock_type acquire() { + return lock_type(my_mutex); + } + +private: + using aligned_storage_type = typename std::aligned_storage::type; + + atomic_node_pointer& my_next(size_type level) { + atomic_node_pointer* arr = reinterpret_cast(this + 1); + return arr[level]; + } + + const atomic_node_pointer& my_next(size_type level) const { + const atomic_node_pointer* arr = reinterpret_cast(this + 1); + return arr[level]; + } + + mutex_type my_mutex; + aligned_storage_type my_val; + size_type my_height; + std::atomic_bool my_fullyLinked; +}; + +template +class skip_list_iterator { + using node_type = NodeType; + using node_ptr = node_type*; +public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename node_type::value_type; + using difference_type = std::ptrdiff_t; + using pointer = typename std::conditional::type; + using reference = typename std::conditional::type; + + skip_list_iterator() : my_node_ptr(nullptr) {} + + // TODO: the code above does not compile in VS2015 (seems like a bug) - consider enabling it for all other platforms + // template ::type> + // skip_list_iterator(const skip_list_iterator& other) : my_node_ptr(other.my_node_ptr) {} + + // skip_list_iterator(const skip_list_iterator& other) : my_node_ptr(other.my_node_ptr) {} + + skip_list_iterator(const skip_list_iterator& other) : my_node_ptr(other.my_node_ptr) {} + + skip_list_iterator& operator=(const skip_list_iterator& other) { + my_node_ptr = other.my_node_ptr; + return *this; + } + + template ::type> + skip_list_iterator(const skip_list_iterator& other) : my_node_ptr(other.my_node_ptr) {} + + reference operator*() const { return *(my_node_ptr->storage()); } + pointer operator->() const { return &**this; } + + skip_list_iterator& operator++() { + __TBB_ASSERT(my_node_ptr != nullptr, NULL); + my_node_ptr = my_node_ptr->next(0); + return *this; + } + + skip_list_iterator operator++(int) { + skip_list_iterator tmp = *this; + ++*this; + return tmp; + } + +private: + skip_list_iterator(node_type* n) : my_node_ptr(n) {} + + node_ptr my_node_ptr; + + template + friend class concurrent_skip_list; + + friend class skip_list_iterator; + + friend class const_range; + friend class range; + + template + friend bool operator==(const skip_list_iterator&, const skip_list_iterator&); + + template + friend bool operator!=(const skip_list_iterator&, const skip_list_iterator&); +}; + +template +bool operator==(const skip_list_iterator& lhs, const skip_list_iterator& rhs) { + return lhs.my_node_ptr == rhs.my_node_ptr; +} + +template +bool operator!=(const skip_list_iterator& lhs, const skip_list_iterator& rhs) { + return lhs.my_node_ptr != rhs.my_node_ptr; +} + +template +class concurrent_skip_list { +protected: + using traits_type = Traits; + using allocator_type = typename traits_type::allocator_type; + using allocator_traits_type = std::allocator_traits; + using key_compare = typename traits_type::compare_type; + using value_compare = typename traits_type::value_compare; + using key_type = typename traits_type::key_type; + using value_type = typename traits_type::value_type; + using node_type = typename traits_type::node_type; + using list_node_type = skip_list_node; + + using iterator = skip_list_iterator; + using const_iterator = skip_list_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + using random_level_generator_type = typename traits_type::random_level_generator_type; + using node_allocator_type = typename std::allocator_traits::template rebind_alloc; + using node_allocator_traits = typename std::allocator_traits::template rebind_traits; + using node_ptr = list_node_type*; + + static constexpr size_type MAX_LEVEL = traits_type::MAX_LEVEL; + + using array_type = std::array; + using lock_array = std::array; + +public: + static bool const allow_multimapping = traits_type::allow_multimapping; + + /** + * Default constructor. Construct empty skip list. + */ + concurrent_skip_list() : my_size(0) { + create_dummy_head(); + } + + explicit concurrent_skip_list(const key_compare& comp, const allocator_type& alloc = allocator_type()) + : my_node_allocator(alloc), my_compare(comp), my_size(0) + { + create_dummy_head(); + } + + template + concurrent_skip_list(InputIt first, InputIt last, const key_compare& comp = key_compare(), + const allocator_type& alloc = allocator_type()) + : my_node_allocator(alloc), my_compare(comp), my_size(0) + { + create_dummy_head(); + internal_copy(first, last); + } + + /** Copy constructor */ + concurrent_skip_list(const concurrent_skip_list& other) + : my_node_allocator(node_allocator_traits::select_on_container_copy_construction(other.get_allocator())), + my_compare(other.my_compare), my_rnd_generator(other.my_rnd_generator), my_size(0) + { + create_dummy_head(); + internal_copy(other); + __TBB_ASSERT(my_size == other.my_size, "Wrong size of copy-constructed container"); + } + + concurrent_skip_list(const concurrent_skip_list& other, const allocator_type& alloc) + : my_node_allocator(alloc), my_compare(other.my_compare), + my_rnd_generator(other.my_rnd_generator), my_size(0) + { + create_dummy_head(); + internal_copy(other); + __TBB_ASSERT(my_size == other.my_size, "Wrong size of copy-constructed container"); + } + + concurrent_skip_list(concurrent_skip_list&& other) + : my_node_allocator(std::move(other.my_node_allocator)), my_compare(other.my_compare), + my_rnd_generator(other.my_rnd_generator) + { + internal_move(std::move(other)); + } + + concurrent_skip_list(concurrent_skip_list&& other, const allocator_type& alloc) + : my_node_allocator(alloc), my_compare(other.my_compare), + my_rnd_generator(other.my_rnd_generator) + { + if (alloc == other.get_allocator()) { + internal_move(std::move(other)); + } else { + my_size = 0; + create_dummy_head(); + internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end())); + } + } + + ~concurrent_skip_list() { + clear(); + delete_dummy_head(); + } + + concurrent_skip_list& operator=(const concurrent_skip_list& other) { + if (this != &other) { + using pocca_type = typename node_allocator_traits::propagate_on_container_copy_assignment; + clear(); + tbb::internal::allocator_copy_assignment(my_node_allocator, other.my_node_allocator, pocca_type()); + my_compare = other.my_compare; + my_rnd_generator = other.my_rnd_generator; + internal_copy(other); + } + return *this; + } + + concurrent_skip_list& operator=(concurrent_skip_list&& other) { + if (this != &other) { + using pocma_type = typename node_allocator_traits::propagate_on_container_move_assignment; + clear(); + my_compare = other.my_compare; + my_rnd_generator = other.my_rnd_generator; + internal_move_assign(std::move(other), pocma_type()); + } + return *this; + } + + concurrent_skip_list& operator=(std::initializer_list il) + { + clear(); + insert(il.begin(),il.end()); + return *this; + } + + std::pair insert(const value_type& value) { + return internal_insert(value); + } + + std::pair insert(value_type&& value) { + return internal_insert(std::move(value)); + } + + iterator insert(const_iterator, const_reference value) { + // Ignore hint + return insert(value).first; + } + + iterator insert(const_iterator, value_type&& value) { + // Ignore hint + return insert(std::move(value)).first; + } + + template + void insert(InputIterator first, InputIterator last) { + for (InputIterator it = first; it != last; ++it) + insert(*it); + } + + void insert(std::initializer_list init) { + insert(init.begin(), init.end()); + } + + std::pair insert(node_type&& nh) { + if(!nh.empty()) { + std::pair insert_result = internal_insert_node(nh.my_node); + if(insert_result.second) { + nh.deactivate(); + } + return insert_result; + } + return std::pair(end(), false); + } + + iterator insert(const_iterator, node_type&& nh) { + // Ignore hint + return insert(std::move(nh)).first; + } + + template + std::pair emplace(Args&&... args) { + return internal_insert(std::forward(args)...); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) { + // Ignore hint + return emplace(std::forward(args)...).first; + } + + iterator unsafe_erase(iterator pos) { + std::pair extract_result = internal_extract(pos); + if(extract_result.first) { // node was extracted + delete_node(extract_result.first); + return iterator(extract_result.second); + } + return end(); + } + + iterator unsafe_erase(const_iterator pos) { + return unsafe_erase(get_iterator(pos)); + } + + template , + typename = typename std::enable_if::value && + !std::is_convertible::value>::type> + size_type unsafe_erase(const K& key) { + std::pair range = equal_range(key); + size_type sz = std::distance(range.first, range.second); + unsafe_erase(range.first, range.second); + return sz; + } + + iterator unsafe_erase(const_iterator first, const_iterator last) { + while(first != last) { + first = unsafe_erase(get_iterator(first)); + } + return get_iterator(first); + } + + size_type unsafe_erase(const key_type& key) { + std::pair range = equal_range(key); + size_type sz = std::distance(range.first, range.second); + unsafe_erase(range.first, range.second); + return sz; + } + + node_type unsafe_extract(const_iterator pos) { + std::pair extract_result = internal_extract(pos); + return extract_result.first ? node_type(extract_result.first) : node_type(); + } + + node_type unsafe_extract(const key_type& key) { + return unsafe_extract(find(key)); + } + + iterator lower_bound(const key_type& key) { + return internal_get_bound(key, my_compare); + } + + const_iterator lower_bound(const key_type& key) const { + return internal_get_bound(key, my_compare); + } + + template > + iterator lower_bound(const K& key) { + return internal_get_bound(key, my_compare); + } + + template > + const_iterator lower_bound(const K& key) const { + return internal_get_bound(key, my_compare); + } + + iterator upper_bound(const key_type& key) { + return internal_get_bound(key, not_greater_compare(my_compare)); + } + + const_iterator upper_bound(const key_type& key) const { + return internal_get_bound(key, not_greater_compare(my_compare)); + } + + template > + iterator upper_bound(const K& key) { + return internal_get_bound(key, not_greater_compare(my_compare)); + } + + template > + const_iterator upper_bound(const K& key) const { + return internal_get_bound(key, not_greater_compare(my_compare)); + } + + iterator find(const key_type& key) { + return internal_find(key); + } + + const_iterator find(const key_type& key) const { + return internal_find(key); + } + + template > + iterator find(const K& key) { + return internal_find(key); + } + + template > + const_iterator find(const K& key) const { + return internal_find(key); + } + + size_type count( const key_type& key ) const { + return internal_count(key); + } + + template > + size_type count(const K& key) const { + return internal_count(key); + } + + bool contains(const key_type& key) const { + return find(key) != end(); + } + + template > + bool contains(const K& key) const { + return find(key) != end(); + } + + void clear() noexcept { + __TBB_ASSERT(dummy_head->height() > 0, NULL); + + node_ptr current = dummy_head->next(0); + while (current) { + __TBB_ASSERT(current->height() > 0, NULL); + node_ptr next = current->next(0); + delete_node(current); + current = next; + } + + my_size = 0; + for (size_type i = 0; i < dummy_head->height(); ++i) { + dummy_head->set_next(i, nullptr); + } + } + + iterator begin() { + return iterator(dummy_head->next(0)); + } + + const_iterator begin() const { + return const_iterator(dummy_head->next(0)); + } + + const_iterator cbegin() const { + return const_iterator(dummy_head->next(0)); + } + + iterator end() { + return iterator(nullptr); + } + + const_iterator end() const { + return const_iterator(nullptr); + } + + const_iterator cend() const { + return const_iterator(nullptr); + } + + size_type size() const { + return my_size.load(std::memory_order_relaxed); + } + + size_type max_size() const { + return my_node_allocator.max_size(); + } + + bool empty() const { + return 0 == size(); + } + + allocator_type get_allocator() const { + return my_node_allocator; + } + + void swap(concurrent_skip_list& other) { + using std::swap; + using pocs_type = typename node_allocator_traits::propagate_on_container_swap; + tbb::internal::allocator_swap(my_node_allocator, other.my_node_allocator, pocs_type()); + swap(my_compare, other.my_compare); + swap(my_rnd_generator, other.my_rnd_generator); + swap(dummy_head, other.dummy_head); + + size_type tmp = my_size; + my_size.store(other.my_size); + other.my_size.store(tmp); + } + + std::pair equal_range(const key_type& key) { + return std::pair(lower_bound(key), upper_bound(key)); + } + + std::pair equal_range(const key_type& key) const { + return std::pair(lower_bound(key), upper_bound(key)); + } + + template > + std::pair equal_range(const K& key) { + return std::pair(lower_bound(key), upper_bound(key)); + } + + template > + std::pair equal_range(const K& key) const { + return std::pair(lower_bound(key), upper_bound(key)); + } + + key_compare key_comp() const { return my_compare; } + + value_compare value_comp() const { return traits_type::value_comp(my_compare); } + + class const_range_type : tbb::internal::no_assign { + public: + using size_type = typename concurrent_skip_list::size_type; + using value_type = typename concurrent_skip_list::value_type; + using iterator = typename concurrent_skip_list::const_iterator; + private: + const_iterator my_end; + const_iterator my_begin; + size_type my_level; + + public: + + bool empty() const { + return my_begin.my_node_ptr->next(0) == my_end.my_node_ptr; + } + + bool is_divisible() const { + return my_level != 0 ? my_begin.my_node_ptr->next(my_level - 1) != my_end.my_node_ptr : false; + } + + size_type size() const { return std::distance(my_begin, my_end);} + + const_range_type( const_range_type& r, split) + : my_end(r.my_end) { + my_begin = iterator(r.my_begin.my_node_ptr->next(r.my_level - 1)); + my_level = my_begin.my_node_ptr->height(); + r.my_end = my_begin; + } + + const_range_type( const concurrent_skip_list& l) + : my_end(l.end()), my_begin(l.begin()), my_level(my_begin.my_node_ptr->height() ) {} + + iterator begin() const { return my_begin; } + iterator end() const { return my_end; } + size_t grainsize() const { return 1; } + + }; // class const_range_type + + class range_type : public const_range_type { + public: + using iterator = typename concurrent_skip_list::iterator; + + range_type(range_type& r, split) : const_range_type(r, split()) {} + range_type(const concurrent_skip_list& l) : const_range_type(l) {} + + iterator begin() const { + node_ptr node = const_range_type::begin().my_node_ptr; + return iterator(node); + } + + iterator end() const { + node_ptr node = const_range_type::end().my_node_ptr; + return iterator(node); } + }; // class range_type + + range_type range() { return range_type(*this); } + const_range_type range() const { return const_range_type(*this); } + +private: + void internal_move(concurrent_skip_list&& other) { + dummy_head = other.dummy_head; + other.dummy_head = nullptr; + other.create_dummy_head(); + + my_size = other.my_size.load(); + other.my_size = 0; + } + + static const key_type& get_key(node_ptr n) { + __TBB_ASSERT(n, NULL); + return traits_type::get_key(n->value()); + } + + template + iterator internal_find(const K& key) { + iterator it = lower_bound(key); + return (it == end() || my_compare(key, traits_type::get_key(*it))) ? end() : it; + } + + template + const_iterator internal_find(const K& key) const { + const_iterator it = lower_bound(key); + return (it == end() || my_compare(key, traits_type::get_key(*it))) ? end() : it; + } + + template + size_type internal_count( const K& key ) const { + if (allow_multimapping) { + std::pair range = equal_range(key); + return std::distance(range.first, range.second); + } + return (find(key) == end()) ? size_type(0) : size_type(1); + } + + /** + * Finds position on the @param level using @param cmp + * @param level - on which level search prev node + * @param prev - pointer to the start node to search + * @param key - key to search + * @param cmp - callable object to compare two objects + * (my_compare member is default comparator) + * @returns pointer to the node which is not satisfy the comparison with @param key + */ + template + pointer_type internal_find_position( size_type level, pointer_type& prev, const K& key, + const comparator& cmp) const { + __TBB_ASSERT(level < prev->height(), "Wrong level to find position"); + pointer_type curr = prev->next(level); + + while (curr && cmp(get_key(curr), key)) { + prev = curr; + __TBB_ASSERT(level < prev->height(), NULL); + curr = prev->next(level); + } + + return curr; + } + + template + void fill_prev_next_arrays(array_type& prev_nodes, array_type& next_nodes, node_ptr prev, const key_type& key, + const comparator& cmp) { + prev_nodes.fill(dummy_head); + next_nodes.fill(nullptr); + + for (size_type h = prev->height(); h > 0; --h) { + node_ptr next = internal_find_position(h - 1, prev, key, cmp); + prev_nodes[h - 1] = prev; + next_nodes[h - 1] = next; + } + } + + template + void fill_prev_next_by_ptr(array_type& prev_nodes, array_type& next_nodes, const_iterator it, const key_type& key, + const comparator& cmp) { + node_ptr prev = dummy_head; + node_ptr erase_node = it.my_node_ptr; + size_type node_height = erase_node->height(); + + for (size_type h = prev->height(); h >= node_height; --h) { + internal_find_position(h - 1, prev, key, cmp); + } + + for (size_type h = node_height; h > 0; --h) { + node_ptr curr = prev->next(h - 1); + while (const_iterator(curr) != it) { + prev = curr; + curr = prev->next(h - 1); + } + prev_nodes[h - 1] = prev; + } + + std::fill(next_nodes.begin(), next_nodes.begin() + node_height, erase_node); + } + + template + std::pair internal_insert(Args&&... args) { + node_ptr new_node = create_node(std::forward(args)...); + std::pair insert_result = internal_insert_node(new_node); + if(!insert_result.second) { + delete_node(new_node); + } + return insert_result; + } + + std::pair internal_insert_node(node_ptr new_node) { + array_type prev_nodes; + array_type next_nodes; + __TBB_ASSERT(dummy_head->height() >= new_node->height(), "Wrong height for new node"); + + do { + if (allow_multimapping) { + fill_prev_next_arrays(prev_nodes, next_nodes, dummy_head, get_key(new_node), + not_greater_compare(my_compare)); + } else { + fill_prev_next_arrays(prev_nodes, next_nodes, dummy_head, get_key(new_node), my_compare); + } + + node_ptr next = next_nodes[0]; + if (next && !allow_multimapping && !my_compare(get_key(new_node), get_key(next))) { + // TODO: do we really need to wait? + while (!next->fully_linked()) { + // TODO: atomic backoff + } + + return std::pair(iterator(next), false); + } + __TBB_ASSERT(allow_multimapping || !next || my_compare(get_key(new_node), get_key(next)), + "Wrong elements order"); + + } while (!try_insert_node(new_node, prev_nodes, next_nodes)); + + __TBB_ASSERT(new_node, NULL); + return std::pair(iterator(new_node), true); + } + + bool try_insert_node(node_ptr new_node, array_type& prev_nodes, array_type& next_nodes) { + __TBB_ASSERT(dummy_head->height() >= new_node->height(), NULL); + + lock_array locks; + + if (!try_lock_nodes(new_node->height(), prev_nodes, next_nodes, locks)) { + return false; + } + + __TBB_ASSERT(allow_multimapping || + ((prev_nodes[0] == dummy_head || + my_compare(get_key(prev_nodes[0]), get_key(new_node))) && + (next_nodes[0] == nullptr || my_compare(get_key(new_node), get_key(next_nodes[0])))), + "Wrong elements order"); + + for (size_type level = 0; level < new_node->height(); ++level) { + __TBB_ASSERT(prev_nodes[level]->height() > level, NULL); + __TBB_ASSERT(prev_nodes[level]->next(level) == next_nodes[level], NULL); + new_node->set_next(level, next_nodes[level]); + prev_nodes[level]->set_next(level, new_node); + } + new_node->mark_linked(); + + ++my_size; + + return true; + } + + bool try_lock_nodes(size_type height, array_type& prevs, array_type& next_nodes, lock_array& locks) { + for (size_type l = 0; l < height; ++l) { + if (l == 0 || prevs[l] != prevs[l - 1]) + locks[l] = prevs[l]->acquire(); + + node_ptr next = prevs[l]->next(l); + if ( next != next_nodes[l]) return false; + } + + return true; + } + + template + const_iterator internal_get_bound(const K& key, const comparator& cmp) const { + node_ptr prev = dummy_head; + __TBB_ASSERT(dummy_head->height() > 0, NULL); + node_ptr next = nullptr; + + for (size_type h = prev->height(); h > 0; --h) { + next = internal_find_position(h - 1, prev, key, cmp); + } + + return const_iterator(next); + } + + template + iterator internal_get_bound(const K& key, const comparator& cmp){ + node_ptr prev = dummy_head; + __TBB_ASSERT(dummy_head->height() > 0, NULL); + node_ptr next = nullptr; + + for (size_type h = prev->height(); h > 0; --h) { + next = internal_find_position(h - 1, prev, key, cmp); + } + + return iterator(next); + } + + // Returns node_ptr to the extracted node and node_ptr to the next node after the extracted + std::pair internal_extract(const_iterator it) { + if ( it != end() ) { + key_type key = traits_type::get_key(*it); + __TBB_ASSERT(dummy_head->height() > 0, NULL); + + array_type prev_nodes; + array_type next_nodes; + + fill_prev_next_by_ptr(prev_nodes, next_nodes, it, key, my_compare); + + node_ptr erase_node = next_nodes[0]; + __TBB_ASSERT(erase_node != nullptr, NULL); + node_ptr next_node = erase_node->next(0); + + if (!my_compare(key, get_key(erase_node))) { + for(size_type level = 0; level < erase_node->height(); ++level) { + __TBB_ASSERT(prev_nodes[level]->height() > level, NULL); + __TBB_ASSERT(next_nodes[level] == erase_node, NULL); + prev_nodes[level]->set_next(level, erase_node->next(level)); + } + --my_size; + return std::pair(erase_node, next_node); + } + } + return std::pair(nullptr, nullptr); + } + +protected: + template + void internal_merge(SourceType&& source) { + using source_type = typename std::decay::type; + using source_iterator = typename source_type::iterator; + __TBB_STATIC_ASSERT((std::is_same::value), "Incompatible containers cannot be merged"); + + for(source_iterator it = source.begin(); it != source.end();) { + source_iterator where = it++; + if (allow_multimapping || !contains(traits_type::get_key(*where))) { + std::pair extract_result = source.internal_extract(where); + + //If the insertion fails - return the node into source + node_type handle(extract_result.first); + __TBB_ASSERT(!handle.empty(), "Extracted handle in merge is empty"); + + if (!insert(std::move(handle)).second) { + source.insert(std::move(handle)); + } + handle.deactivate(); + } + } + } + +private: + void internal_copy(const concurrent_skip_list& other) { + internal_copy(other.begin(), other.end()); + } + + template + void internal_copy(Iterator first, Iterator last) { + clear(); + try { + for (auto it = first; it != last; ++it) + insert(*it); + } + catch (...) { + clear(); + delete_dummy_head(); + throw; + } + } + + /** Generate random level */ + size_type random_level() { + return my_rnd_generator(); + } + + static size_type calc_node_size(size_type height) { + return sizeof(list_node_type) + height*sizeof(typename list_node_type::atomic_node_pointer); + } + + /** Creates new node */ + template + node_ptr create_node(Args&&... args) { + size_type levels = random_level(); + + size_type sz = calc_node_size(levels); + + node_ptr node = reinterpret_cast(node_allocator_traits::allocate(my_node_allocator, sz)); + + try { + node_allocator_traits::construct(my_node_allocator, node, levels); + + } + catch(...) { + deallocate_node(node, sz); + throw; + } + + try { + node_allocator_traits::construct(my_node_allocator, node->storage(), std::forward(args)...); + } + catch (...) { + node_allocator_traits::destroy(my_node_allocator, node); + deallocate_node(node, sz); + throw; + } + + return node; + } + + void create_dummy_head() { + size_type sz = calc_node_size(MAX_LEVEL); + + dummy_head = reinterpret_cast(node_allocator_traits::allocate(my_node_allocator, sz)); + // TODO: investigate linkage fail in debug without this workaround + auto max_level = MAX_LEVEL; + + try { + node_allocator_traits::construct(my_node_allocator, dummy_head, max_level); + } + catch(...) { + deallocate_node(dummy_head, sz); + throw; + } + } + + template + void delete_node(node_ptr node) { + size_type sz = calc_node_size(node->height()); + // Destroy value + if (!is_dummy) node_allocator_traits::destroy(my_node_allocator, node->storage()); + // Destroy node + node_allocator_traits::destroy(my_node_allocator, node); + // Deallocate memory + deallocate_node(node, sz); + } + + void deallocate_node(node_ptr node, size_type sz) { + node_allocator_traits::deallocate(my_node_allocator, reinterpret_cast(node), sz); + } + + void delete_dummy_head() { + delete_node(dummy_head); + } + + static iterator get_iterator(const_iterator it) { + return iterator(it.my_node_ptr); + } + + void internal_move_assign(concurrent_skip_list&& other, /*POCMA=*/std::true_type) { + delete_dummy_head(); + tbb::internal::allocator_move_assignment(my_node_allocator, other.my_node_allocator, std::true_type()); + internal_move(std::move(other)); + } + + void internal_move_assign(concurrent_skip_list&& other, /*POCMA=*/std::false_type) { + if (my_node_allocator == other.my_node_allocator) { + delete_dummy_head(); + internal_move(std::move(other)); + } else { + internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end())); + } + } + + struct not_greater_compare { + const key_compare& my_less_compare; + + not_greater_compare(const key_compare& less_compare) : my_less_compare(less_compare) {} + + template + bool operator()(const K1& first, const K2& second) const { + return !my_less_compare(second, first); + } + }; + + node_allocator_type my_node_allocator; + key_compare my_compare; + random_level_generator_type my_rnd_generator; + node_ptr dummy_head; + + template + friend class concurrent_skip_list; + + std::atomic my_size; +}; // class concurrent_skip_list + +template +class concurrent_geometric_level_generator { +public: + static constexpr size_t max_level = MAX_LEVEL; + + concurrent_geometric_level_generator() : engines(time(NULL)) {} + + size_t operator()() { + return (distribution(engines.local()) % MAX_LEVEL) + 1; + } + +private: + tbb::enumerable_thread_specific engines; + std::geometric_distribution distribution; +}; + +} // namespace internal +} // namespace interface10 +} // namespace tbb + +#endif // __TBB_concurrent_skip_list_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_concurrent_unordered_impl.h b/ohos/arm64-v8a/include/tbb/internal/_concurrent_unordered_impl.h new file mode 100644 index 00000000..b4bbefa5 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_concurrent_unordered_impl.h @@ -0,0 +1,1684 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* Container implementations in this header are based on PPL implementations + provided by Microsoft. */ + +#ifndef __TBB__concurrent_unordered_impl_H +#define __TBB__concurrent_unordered_impl_H +#if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "../tbb_stddef.h" + +#include +#include // Need std::pair +#include // Need std::equal_to (in ../concurrent_unordered_*.h) +#include // For tbb_hasher +#include // Need std::memset +#include __TBB_STD_SWAP_HEADER + +#include "../atomic.h" +#include "../tbb_exception.h" +#include "../tbb_allocator.h" + +#if __TBB_INITIALIZER_LISTS_PRESENT + #include +#endif + +#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_COPY_DELETION_BROKEN + #define __TBB_UNORDERED_NODE_HANDLE_PRESENT 1 +#endif + +#include "_allocator_traits.h" +#include "_tbb_hash_compare_impl.h" +#include "_template_helpers.h" + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT +#include "_node_handle_impl.h" +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + +namespace tbb { +namespace interface5 { +//! @cond INTERNAL +namespace internal { + +template +class split_ordered_list; +template +class concurrent_unordered_base; + +// Forward list iterators (without skipping dummy elements) +template +class flist_iterator : public std::iterator +{ + template + friend class split_ordered_list; + template + friend class concurrent_unordered_base; + template + friend class flist_iterator; + + typedef typename Solist::nodeptr_t nodeptr_t; +public: + typedef typename Solist::value_type value_type; + typedef typename Solist::difference_type difference_type; + typedef typename Solist::pointer pointer; + typedef typename Solist::reference reference; + + flist_iterator() : my_node_ptr(0) {} + flist_iterator( const flist_iterator &other ) + : my_node_ptr(other.my_node_ptr) {} + + flist_iterator& operator=( const flist_iterator &other ) { + my_node_ptr = other.my_node_ptr; + return *this; + } + + reference operator*() const { return my_node_ptr->my_element; } + pointer operator->() const { return &**this; } + + flist_iterator& operator++() { + my_node_ptr = my_node_ptr->my_next; + return *this; + } + + flist_iterator operator++(int) { + flist_iterator tmp = *this; + ++*this; + return tmp; + } + +protected: + flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {} + nodeptr_t get_node_ptr() const { return my_node_ptr; } + + nodeptr_t my_node_ptr; + + template + friend bool operator==( const flist_iterator &i, const flist_iterator &j ); + template + friend bool operator!=( const flist_iterator& i, const flist_iterator& j ); +}; + +template +bool operator==( const flist_iterator &i, const flist_iterator &j ) { + return i.my_node_ptr == j.my_node_ptr; +} +template +bool operator!=( const flist_iterator& i, const flist_iterator& j ) { + return i.my_node_ptr != j.my_node_ptr; +} + +// Split-order list iterators, needed to skip dummy elements +template +class solist_iterator : public flist_iterator +{ + typedef flist_iterator base_type; + typedef typename Solist::nodeptr_t nodeptr_t; + using base_type::get_node_ptr; + template + friend class split_ordered_list; + template + friend class solist_iterator; + template + friend class concurrent_unordered_base; + template + friend bool operator==( const solist_iterator &i, const solist_iterator &j ); + template + friend bool operator!=( const solist_iterator& i, const solist_iterator& j ); + + const Solist *my_list_ptr; + solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {} + +public: + typedef typename Solist::value_type value_type; + typedef typename Solist::difference_type difference_type; + typedef typename Solist::pointer pointer; + typedef typename Solist::reference reference; + + solist_iterator() {} + solist_iterator( const solist_iterator &other ) + : base_type(other), my_list_ptr(other.my_list_ptr) {} + + solist_iterator& operator=( const solist_iterator &other ) { + base_type::my_node_ptr = other.get_node_ptr(); + my_list_ptr = other.my_list_ptr; + return *this; + } + + reference operator*() const { + return this->base_type::operator*(); + } + + pointer operator->() const { + return (&**this); + } + + solist_iterator& operator++() { + do ++(*(base_type *)this); + while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); + + return (*this); + } + + solist_iterator operator++(int) { + solist_iterator tmp = *this; + do ++*this; + while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); + + return (tmp); + } +}; + +template +bool operator==( const solist_iterator &i, const solist_iterator &j ) { + return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr; +} +template +bool operator!=( const solist_iterator& i, const solist_iterator& j ) { + return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr; +} + +// Forward type and class definitions +typedef size_t sokey_t; + + +// Forward list in which elements are sorted in a split-order +template +class split_ordered_list +{ +public: + typedef split_ordered_list self_type; + + typedef typename tbb::internal::allocator_rebind::type allocator_type; + + struct node; + typedef node *nodeptr_t; + + typedef typename tbb::internal::allocator_traits::value_type value_type; + typedef typename tbb::internal::allocator_traits::size_type size_type; + typedef typename tbb::internal::allocator_traits::difference_type difference_type; + typedef typename tbb::internal::allocator_traits::pointer pointer; + typedef typename tbb::internal::allocator_traits::const_pointer const_pointer; + // No support for reference/const_reference in allocator traits + typedef value_type& reference; + typedef const value_type& const_reference; + + typedef solist_iterator const_iterator; + typedef solist_iterator iterator; + typedef flist_iterator raw_const_iterator; + typedef flist_iterator raw_iterator; + + // Node that holds the element in a split-ordered list + struct node : tbb::internal::no_assign + { + private: + // for compilers that try to generate default constructors though they are not needed. + node(); // VS 2008, 2010, 2012 + public: + // Initialize the node with the given order key + void init(sokey_t order_key) { + my_order_key = order_key; + my_next = NULL; + } + + // Return the order key (needed for hashing) + sokey_t get_order_key() const { // TODO: remove + return my_order_key; + } + + // get() and value() is a common interface for getting access to node`s element (required by node_handle) + value_type* storage() { + return reinterpret_cast(&my_element); + } + + value_type& value() { + return *storage(); + } + + // Inserts the new element in the list in an atomic fashion + nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node) + { + // Try to change the next pointer on the current element to a new element, only if it still points to the cached next + nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node); + + if (exchange_node == current_node) // TODO: why this branch? + { + // Operation succeeded, return the new node + return new_node; + } + else + { + // Operation failed, return the "interfering" node + return exchange_node; + } + } + + // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets + // in the hash table to quickly index into the right subsection of the split-ordered list. + bool is_dummy() const { + return (my_order_key & 0x1) == 0; + } + + + nodeptr_t my_next; // Next element in the list + value_type my_element; // Element storage + sokey_t my_order_key; // Order key for this element + }; + + // Allocate a new node with the given order key; used to allocate dummy nodes + nodeptr_t create_node(sokey_t order_key) { + nodeptr_t pnode = my_node_allocator.allocate(1); + pnode->init(order_key); + return (pnode); + } + + // Allocate a new node with the given order key and value + template + nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t, + /*AllowCreate=*/tbb::internal::true_type=tbb::internal::true_type()){ + nodeptr_t pnode = my_node_allocator.allocate(1); + + //TODO: use RAII scoped guard instead of explicit catch + __TBB_TRY { + new(static_cast(&pnode->my_element)) T(tbb::internal::forward(t)); + pnode->init(order_key); + } __TBB_CATCH(...) { + my_node_allocator.deallocate(pnode, 1); + __TBB_RETHROW(); + } + + return (pnode); + } + + // A helper to avoid excessive requiremens in internal_insert + template + nodeptr_t create_node(sokey_t, __TBB_FORWARDING_REF(Arg), + /*AllowCreate=*/tbb::internal::false_type){ + __TBB_ASSERT(false, "This compile-time helper should never get called"); + return nodeptr_t(); + } + + // Allocate a new node with the given parameters for constructing value + template + nodeptr_t create_node_v( __TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args){ + nodeptr_t pnode = my_node_allocator.allocate(1); + + //TODO: use RAII scoped guard instead of explicit catch + __TBB_TRY { + new(static_cast(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward(args))); + } __TBB_CATCH(...) { + my_node_allocator.deallocate(pnode, 1); + __TBB_RETHROW(); + } + + return (pnode); + } + + split_ordered_list(allocator_type a = allocator_type()) + : my_node_allocator(a), my_element_count(0) + { + // Immediately allocate a dummy node with order key of 0. This node + // will always be the head of the list. + my_head = create_node(sokey_t(0)); + } + + ~split_ordered_list() + { + // Clear the list + clear(); + + // Remove the head element which is not cleared by clear() + nodeptr_t pnode = my_head; + my_head = NULL; + + __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node"); + + destroy_node(pnode); + } + + // Common forward list functions + + allocator_type get_allocator() const { + return (my_node_allocator); + } + + void clear() { + nodeptr_t pnext; + nodeptr_t pnode = my_head; + + __TBB_ASSERT(my_head != NULL, "Invalid head list node"); + pnext = pnode->my_next; + pnode->my_next = NULL; + pnode = pnext; + + while (pnode != NULL) + { + pnext = pnode->my_next; + destroy_node(pnode); + pnode = pnext; + } + + my_element_count = 0; + } + + // Returns a first non-dummy element in the SOL + iterator begin() { + return first_real_iterator(raw_begin()); + } + + // Returns a first non-dummy element in the SOL + const_iterator begin() const { + return first_real_iterator(raw_begin()); + } + + iterator end() { + return (iterator(0, this)); + } + + const_iterator end() const { + return (const_iterator(0, this)); + } + + const_iterator cbegin() const { + return (((const self_type *)this)->begin()); + } + + const_iterator cend() const { + return (((const self_type *)this)->end()); + } + + // Checks if the number of elements (non-dummy) is 0 + bool empty() const { + return (my_element_count == 0); + } + + // Returns the number of non-dummy elements in the list + size_type size() const { + return my_element_count; + } + + // Returns the maximum size of the list, determined by the allocator + size_type max_size() const { + return my_node_allocator.max_size(); + } + + // Swaps 'this' list with the passed in one + void swap(self_type& other) + { + if (this == &other) + { + // Nothing to do + return; + } + + std::swap(my_element_count, other.my_element_count); + std::swap(my_head, other.my_head); + } + + // Split-order list functions + + // Returns a first element in the SOL, which is always a dummy + raw_iterator raw_begin() { + return raw_iterator(my_head); + } + + // Returns a first element in the SOL, which is always a dummy + raw_const_iterator raw_begin() const { + return raw_const_iterator(my_head); + } + + raw_iterator raw_end() { + return raw_iterator(0); + } + + raw_const_iterator raw_end() const { + return raw_const_iterator(0); + } + + static sokey_t get_order_key(const raw_const_iterator& it) { + return it.get_node_ptr()->get_order_key(); + } + + static sokey_t get_safe_order_key(const raw_const_iterator& it) { + if( !it.get_node_ptr() ) return ~sokey_t(0); + return it.get_node_ptr()->get_order_key(); + } + + // Returns a public iterator version of the internal iterator. Public iterator must not + // be a dummy private iterator. + iterator get_iterator(raw_iterator it) { + __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); + return iterator(it.get_node_ptr(), this); + } + + // Returns a public iterator version of the internal iterator. Public iterator must not + // be a dummy private iterator. + const_iterator get_iterator(raw_const_iterator it) const { + __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); + return const_iterator(it.get_node_ptr(), this); + } + + // Returns a non-const version of the raw_iterator + raw_iterator get_iterator(raw_const_iterator it) { + return raw_iterator(it.get_node_ptr()); + } + + // Returns a non-const version of the iterator + static iterator get_iterator(const_iterator it) { + return iterator(it.my_node_ptr, it.my_list_ptr); + } + + // Returns a public iterator version of a first non-dummy internal iterator at or after + // the passed in internal iterator. + iterator first_real_iterator(raw_iterator it) + { + // Skip all dummy, internal only iterators + while (it != raw_end() && it.get_node_ptr()->is_dummy()) + ++it; + + return iterator(it.get_node_ptr(), this); + } + + // Returns a public iterator version of a first non-dummy internal iterator at or after + // the passed in internal iterator. + const_iterator first_real_iterator(raw_const_iterator it) const + { + // Skip all dummy, internal only iterators + while (it != raw_end() && it.get_node_ptr()->is_dummy()) + ++it; + + return const_iterator(it.get_node_ptr(), this); + } + + // Erase an element using the allocator + void destroy_node(nodeptr_t pnode) { + if (!pnode->is_dummy()) my_node_allocator.destroy(pnode); + my_node_allocator.deallocate(pnode, 1); + } + + // Try to insert a new element in the list. + // If insert fails, return the node that was inserted instead. + static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) { + new_node->my_next = current_node; + return previous->atomic_set_next(new_node, current_node); + } + + // Insert a new element between passed in iterators + std::pair try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count) + { + nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr()); + + if (inserted_node == pnode) + { + // If the insert succeeded, check that the order is correct and increment the element count + check_range(it, next); + *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment(); + return std::pair(iterator(pnode, this), true); + } + else + { + return std::pair(end(), false); + } + } + + // Insert a new dummy element, starting search at a parent dummy element + raw_iterator insert_dummy(raw_iterator it, sokey_t order_key) + { + raw_iterator last = raw_end(); + raw_iterator where = it; + + __TBB_ASSERT(where != last, "Invalid head node"); + + ++where; + + // Create a dummy element up front, even though it may be discarded (due to concurrent insertion) + nodeptr_t dummy_node = create_node(order_key); + + for (;;) + { + __TBB_ASSERT(it != last, "Invalid head list node"); + + // If the head iterator is at the end of the list, or past the point where this dummy + // node needs to be inserted, then try to insert it. + if (where == last || get_order_key(where) > order_key) + { + __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list"); + + // Try to insert it in the right place + nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr()); + + if (inserted_node == dummy_node) + { + // Insertion succeeded, check the list for order violations + check_range(it, where); + return raw_iterator(dummy_node); + } + else + { + // Insertion failed: either dummy node was inserted by another thread, or + // a real element was inserted at exactly the same place as dummy node. + // Proceed with the search from the previous location where order key was + // known to be larger (note: this is legal only because there is no safe + // concurrent erase operation supported). + where = it; + ++where; + continue; + } + } + else if (get_order_key(where) == order_key) + { + // Another dummy node with the same value found, discard the new one. + destroy_node(dummy_node); + return where; + } + + // Move the iterator forward + it = where; + ++where; + } + + } + + nodeptr_t erase_node_impl(raw_iterator previous, raw_const_iterator& where) { + nodeptr_t pnode = (where++).get_node_ptr(); + nodeptr_t prevnode = previous.get_node_ptr(); + __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators"); + prevnode->my_next = pnode->my_next; + return pnode; + } + + // This erase function can handle both real and dummy nodes + void erase_node(raw_iterator previous, raw_const_iterator& where, + /*allow_destroy*/tbb::internal::true_type) + { + nodeptr_t pnode = erase_node_impl(previous, where); + destroy_node(pnode); + } + + void erase_node(raw_iterator previous, raw_const_iterator& where, + /*allow_destroy*/tbb::internal::false_type) + { + erase_node_impl(previous, where); + } + + void erase_node(raw_iterator previous, raw_const_iterator& where) { + erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type()); + } + + // Erase the element (previous node needs to be passed because this is a forward only list) + template + iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy) + { + raw_const_iterator it = where; + erase_node(previous, it, AllowDestroy()); + my_element_count--; + + return get_iterator(first_real_iterator(it)); + } + + iterator erase_node(raw_iterator previous, const_iterator& where) { + return erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type()); + } + + + + // Move all elements from the passed in split-ordered list to this one + void move_all(self_type& source) + { + raw_const_iterator first = source.raw_begin(); + raw_const_iterator last = source.raw_end(); + + if (first == last) + return; + + nodeptr_t previous_node = my_head; + raw_const_iterator begin_iterator = first++; + + // Move all elements one by one, including dummy ones + for (raw_const_iterator it = first; it != last;) + { + nodeptr_t pnode = it.get_node_ptr(); + + nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element); + previous_node = try_insert_atomic(previous_node, dummy_node, NULL); + __TBB_ASSERT(previous_node != NULL, "Insertion must succeed"); + raw_const_iterator where = it++; + source.erase_node(get_iterator(begin_iterator), where); + } + check_range(); + } + + +private: + //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base + template + friend class concurrent_unordered_base; + + // Check the list for order violations + void check_range( raw_iterator first, raw_iterator last ) + { +#if TBB_USE_ASSERT + for (raw_iterator it = first; it != last; ++it) + { + raw_iterator next = it; + ++next; + + __TBB_ASSERT(next == raw_end() || get_order_key(next) >= get_order_key(it), "!!! List order inconsistency !!!"); + } +#else + tbb::internal::suppress_unused_warning(first, last); +#endif + } + void check_range() + { +#if TBB_USE_ASSERT + check_range( raw_begin(), raw_end() ); +#endif + } + + typename tbb::internal::allocator_rebind::type my_node_allocator; // allocator object for nodes + size_type my_element_count; // Total item count, not counting dummy nodes + nodeptr_t my_head; // pointer to head node +}; + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +#pragma warning(push) +#pragma warning(disable: 4127) // warning C4127: conditional expression is constant +#endif + +template +class concurrent_unordered_base : public Traits +{ +protected: + // Type definitions + typedef concurrent_unordered_base self_type; + typedef typename Traits::value_type value_type; + typedef typename Traits::key_type key_type; + typedef typename Traits::hash_compare hash_compare; + typedef typename Traits::allocator_type allocator_type; + typedef typename hash_compare::hasher hasher; + typedef typename hash_compare::key_equal key_equal; + + typedef typename tbb::internal::allocator_traits::size_type size_type; + typedef typename tbb::internal::allocator_traits::difference_type difference_type; + typedef typename tbb::internal::allocator_traits::pointer pointer; + typedef typename tbb::internal::allocator_traits::const_pointer const_pointer; + // No support for reference/const_reference in allocator + typedef typename allocator_type::value_type& reference; + typedef const typename allocator_type::value_type& const_reference; + + typedef split_ordered_list solist_t; + typedef typename solist_t::nodeptr_t nodeptr_t; + // Iterators that walk the entire split-order list, including dummy nodes + typedef typename solist_t::raw_iterator raw_iterator; + typedef typename solist_t::raw_const_iterator raw_const_iterator; + typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets + typedef typename solist_t::const_iterator const_iterator; + typedef iterator local_iterator; + typedef const_iterator const_local_iterator; +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + typedef typename Traits::node_type node_type; +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + using Traits::my_hash_compare; + using Traits::get_key; + using Traits::allow_multimapping; + + static const size_type initial_bucket_number = 8; // Initial number of buckets + +private: + template + friend class concurrent_unordered_base; + + typedef std::pair pairii_t; + typedef std::pair paircc_t; + + static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit + static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket + + struct call_internal_clear_on_exit{ + concurrent_unordered_base* my_instance; + call_internal_clear_on_exit(concurrent_unordered_base* instance) : my_instance(instance) {} + void dismiss(){ my_instance = NULL;} + ~call_internal_clear_on_exit(){ + if (my_instance){ + my_instance->internal_clear(); + } + } + }; +protected: + // Constructors/Destructors + concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number, + const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type()) + : Traits(hc), my_solist(a), + my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load) + { + if( n_of_buckets == 0) ++n_of_buckets; + my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2 + internal_init(); + } + + concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a) + : Traits(right.my_hash_compare), my_solist(a), my_allocator(a) + { + internal_init(); + internal_copy(right); + } + + concurrent_unordered_base(const concurrent_unordered_base& right) + : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()) + { + //FIXME:exception safety seems to be broken here + internal_init(); + internal_copy(right); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + concurrent_unordered_base(concurrent_unordered_base&& right) + : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()), + my_maximum_bucket_size(float(initial_bucket_load)) + { + my_number_of_buckets = initial_bucket_number; + internal_init(); + swap(right); + } + + concurrent_unordered_base(concurrent_unordered_base&& right, const allocator_type& a) + : Traits(right.my_hash_compare), my_solist(a), my_allocator(a) + { + call_internal_clear_on_exit clear_buckets_on_exception(this); + + internal_init(); + if (a == right.get_allocator()){ + my_number_of_buckets = initial_bucket_number; + my_maximum_bucket_size = float(initial_bucket_load); + this->swap(right); + }else{ + my_maximum_bucket_size = right.my_maximum_bucket_size; + my_number_of_buckets = right.my_number_of_buckets; + my_solist.my_element_count = right.my_solist.my_element_count; + + if (! right.my_solist.empty()){ + nodeptr_t previous_node = my_solist.my_head; + + // Move all elements one by one, including dummy ones + for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it) + { + const nodeptr_t pnode = it.get_node_ptr(); + nodeptr_t node; + if (pnode->is_dummy()) { + node = my_solist.create_node(pnode->get_order_key()); + size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets; + set_bucket(bucket, node); + }else{ + node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element)); + } + + previous_node = my_solist.try_insert_atomic(previous_node, node, NULL); + __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?"); + } + my_solist.check_range(); + } + } + + clear_buckets_on_exception.dismiss(); + } + +#endif // __TBB_CPP11_RVALUE_REF_PRESENT + + concurrent_unordered_base& operator=(const concurrent_unordered_base& right) { + if (this != &right) + internal_copy(right); + return (*this); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + concurrent_unordered_base& operator=(concurrent_unordered_base&& other) + { + if(this != &other){ + typedef typename tbb::internal::allocator_traits::propagate_on_container_move_assignment pocma_t; + if(pocma_t::value || this->my_allocator == other.my_allocator) { + concurrent_unordered_base trash (std::move(*this)); + swap(other); + if (pocma_t::value) { + using std::swap; + //TODO: swapping allocators here may be a problem, replace with single direction moving + swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator); + swap(this->my_allocator, other.my_allocator); + } + } else { + concurrent_unordered_base moved_copy(std::move(other),this->my_allocator); + this->swap(moved_copy); + } + } + return *this; + } + +#endif // __TBB_CPP11_RVALUE_REF_PRESENT + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! assignment operator from initializer_list + concurrent_unordered_base& operator=(std::initializer_list il) + { + this->clear(); + this->insert(il.begin(),il.end()); + return (*this); + } +#endif // __TBB_INITIALIZER_LISTS_PRESENT + + + ~concurrent_unordered_base() { + // Delete all node segments + internal_clear(); + } + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + template + void internal_merge(SourceType& source) { + typedef typename SourceType::iterator source_iterator; + __TBB_STATIC_ASSERT((tbb::internal::is_same_type::value), + "Incompatible containers cannot be merged"); + + for(source_iterator it = source.begin(); it != source.end();) { + source_iterator where = it++; + if (allow_multimapping || find(get_key(*where)) == end()) { + std::pair extract_result = source.internal_extract(where); + + // Remember the old order key + sokey_t old_order_key = extract_result.first.my_node->get_order_key(); + + // If the insertion fails, it returns ownership of the node to extract_result.first + // extract_result.first remains valid node handle + if (!insert(std::move(extract_result.first)).second) { + raw_iterator next = extract_result.second; + raw_iterator current = next++; + + // Revert order key to old value + extract_result.first.my_node->init(old_order_key); + + __TBB_ASSERT(extract_result.first.my_node->get_order_key() >= current.get_node_ptr()->get_order_key(), + "Wrong nodes order in source container"); + __TBB_ASSERT(next==source.my_solist.raw_end() || + extract_result.first.my_node->get_order_key() <= next.get_node_ptr()->get_order_key(), + "Wrong nodes order in source container"); + + size_t new_count = 0;// To use try_insert() + bool insert_result = + source.my_solist.try_insert(current, next, extract_result.first.my_node, &new_count).second; + __TBB_ASSERT_EX(insert_result, "Return to source must be successful. " + "Changing source container while merging is unsafe."); + } + extract_result.first.deactivate(); + } + } + } +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + +public: + allocator_type get_allocator() const { + return my_solist.get_allocator(); + } + + // Size and capacity function + bool empty() const { + return my_solist.empty(); + } + + size_type size() const { + return my_solist.size(); + } + + size_type max_size() const { + return my_solist.max_size(); + } + + // Iterators + iterator begin() { + return my_solist.begin(); + } + + const_iterator begin() const { + return my_solist.begin(); + } + + iterator end() { + return my_solist.end(); + } + + const_iterator end() const { + return my_solist.end(); + } + + const_iterator cbegin() const { + return my_solist.cbegin(); + } + + const_iterator cend() const { + return my_solist.cend(); + } + + // Parallel traversal support + class const_range_type : tbb::internal::no_assign { + const concurrent_unordered_base &my_table; + raw_const_iterator my_begin_node; + raw_const_iterator my_end_node; + mutable raw_const_iterator my_midpoint_node; + public: + //! Type for size of a range + typedef typename concurrent_unordered_base::size_type size_type; + typedef typename concurrent_unordered_base::value_type value_type; + typedef typename concurrent_unordered_base::reference reference; + typedef typename concurrent_unordered_base::difference_type difference_type; + typedef typename concurrent_unordered_base::const_iterator iterator; + + //! True if range is empty. + bool empty() const {return my_begin_node == my_end_node;} + + //! True if range can be partitioned into two subranges. + bool is_divisible() const { + return my_midpoint_node != my_end_node; + } + //! Split range. + const_range_type( const_range_type &r, split ) : + my_table(r.my_table), my_end_node(r.my_end_node) + { + r.my_end_node = my_begin_node = r.my_midpoint_node; + __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); + __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); + set_midpoint(); + r.set_midpoint(); + } + //! Init range with container and grainsize specified + const_range_type( const concurrent_unordered_base &a_table ) : + my_table(a_table), my_begin_node(a_table.my_solist.begin()), + my_end_node(a_table.my_solist.end()) + { + set_midpoint(); + } + iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); } + iterator end() const { return my_table.my_solist.get_iterator(my_end_node); } + //! The grain size for this range. + size_type grainsize() const { return 1; } + + //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node. + void set_midpoint() const { + if( my_begin_node == my_end_node ) // not divisible + my_midpoint_node = my_end_node; + else { + sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node); + sokey_t end_key = solist_t::get_safe_order_key(my_end_node); + size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets; + while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket); + if(__TBB_ReverseBits(mid_bucket) > begin_key) { + // found a dummy_node between begin and end + my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket )); + } + else { + // didn't find a dummy node between begin and end. + my_midpoint_node = my_end_node; + } +#if TBB_USE_ASSERT + { + sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node); + __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" ); + __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" ); + } +#endif // TBB_USE_ASSERT + } + } + }; + + class range_type : public const_range_type { + public: + typedef typename concurrent_unordered_base::iterator iterator; + //! Split range. + range_type( range_type &r, split ) : const_range_type( r, split() ) {} + //! Init range with container and grainsize specified + range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {} + + iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); } + iterator end() const { return solist_t::get_iterator( const_range_type::end() ); } + }; + + range_type range() { + return range_type( *this ); + } + + const_range_type range() const { + return const_range_type( *this ); + } + + // Modifiers + std::pair insert(const value_type& value) { + return internal_insert(value); + } + + iterator insert(const_iterator, const value_type& value) { + // Ignore hint + return insert(value).first; + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + std::pair insert(value_type&& value) { + return internal_insert(std::move(value)); + } + + iterator insert(const_iterator, value_type&& value) { + // Ignore hint + return insert(std::move(value)).first; + } +#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/ + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + std::pair insert(node_type&& nh) { + if (!nh.empty()) { + nodeptr_t handled_node = nh.my_node; + std::pair insert_result = + internal_insert + (handled_node->my_element, handled_node); + if (insert_result.second) + nh.deactivate(); + return insert_result; + } + return std::pair(end(), false); + } + + iterator insert(const_iterator, node_type&& nh) { + return insert(std::move(nh)).first; + } +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT + template + std::pair emplace(Args&&... args) { + nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward(args)...); + + return internal_insert(pnode->my_element, pnode); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) { + // Ignore hint + return emplace(tbb::internal::forward(args)...).first; + } +#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT + + + template + void insert(Iterator first, Iterator last) { + for (Iterator it = first; it != last; ++it) + insert(*it); + } + +#if __TBB_INITIALIZER_LISTS_PRESENT + //! Insert initializer list + void insert(std::initializer_list il) { + insert(il.begin(), il.end()); + } +#endif + + iterator unsafe_erase(const_iterator where) { + return internal_erase(where); + } + + iterator unsafe_erase(const_iterator first, const_iterator last) { + while (first != last) + unsafe_erase(first++); + return my_solist.get_iterator(first); + } + + size_type unsafe_erase(const key_type& key) { + pairii_t where = equal_range(key); + size_type item_count = internal_distance(where.first, where.second); + unsafe_erase(where.first, where.second); + return item_count; + } + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + node_type unsafe_extract(const_iterator where) { + return internal_extract(where).first; + } + + node_type unsafe_extract(const key_type& key) { + pairii_t where = equal_range(key); + if (where.first == end()) return node_type(); // element was not found + return internal_extract(where.first).first; + } +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + + void swap(concurrent_unordered_base& right) { + if (this != &right) { + std::swap(my_hash_compare, right.my_hash_compare); + my_solist.swap(right.my_solist); + internal_swap_buckets(right); + std::swap(my_number_of_buckets, right.my_number_of_buckets); + std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size); + } + } + + // Observers + hasher hash_function() const { + return my_hash_compare.my_hash_object; + } + + key_equal key_eq() const { + return my_hash_compare.my_key_compare_object; + } + + void clear() { + // Clear list + my_solist.clear(); + + // Clear buckets + internal_clear(); + + // Initialize bucket 0 + __TBB_ASSERT(my_buckets[0] == NULL, NULL); + raw_iterator dummy_node = my_solist.raw_begin(); + set_bucket(0, dummy_node); + } + + // Lookup + iterator find(const key_type& key) { + return internal_find(key); + } + + const_iterator find(const key_type& key) const { + return const_cast(this)->internal_find(key); + } + + size_type count(const key_type& key) const { + if(allow_multimapping) { + paircc_t answer = equal_range(key); + size_type item_count = internal_distance(answer.first, answer.second); + return item_count; + } else { + return const_cast(this)->internal_find(key) == end()?0:1; + } + } + + std::pair equal_range(const key_type& key) { + return internal_equal_range(key); + } + + std::pair equal_range(const key_type& key) const { + return const_cast(this)->internal_equal_range(key); + } + + // Bucket interface - for debugging + size_type unsafe_bucket_count() const { + return my_number_of_buckets; + } + + size_type unsafe_max_bucket_count() const { + return segment_size(pointers_per_table-1); + } + + size_type unsafe_bucket_size(size_type bucket) { + size_type item_count = 0; + if (is_initialized(bucket)) { + raw_iterator it = get_bucket(bucket); + ++it; + for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it) + ++item_count; + } + return item_count; + } + + size_type unsafe_bucket(const key_type& key) const { + sokey_t order_key = (sokey_t) my_hash_compare(key); + size_type bucket = order_key % my_number_of_buckets; + return bucket; + } + + // If the bucket is initialized, return a first non-dummy element in it + local_iterator unsafe_begin(size_type bucket) { + if (!is_initialized(bucket)) + return end(); + + raw_iterator it = get_bucket(bucket); + return my_solist.first_real_iterator(it); + } + + // If the bucket is initialized, return a first non-dummy element in it + const_local_iterator unsafe_begin(size_type bucket) const + { + if (!is_initialized(bucket)) + return end(); + + raw_const_iterator it = get_bucket(bucket); + return my_solist.first_real_iterator(it); + } + + // @REVIEW: Takes O(n) + // Returns the iterator after the last non-dummy element in the bucket + local_iterator unsafe_end(size_type bucket) + { + if (!is_initialized(bucket)) + return end(); + + raw_iterator it = get_bucket(bucket); + + // Find the end of the bucket, denoted by the dummy element + do ++it; + while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); + + // Return the first real element past the end of the bucket + return my_solist.first_real_iterator(it); + } + + // @REVIEW: Takes O(n) + // Returns the iterator after the last non-dummy element in the bucket + const_local_iterator unsafe_end(size_type bucket) const + { + if (!is_initialized(bucket)) + return end(); + + raw_const_iterator it = get_bucket(bucket); + + // Find the end of the bucket, denoted by the dummy element + do ++it; + while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); + + // Return the first real element past the end of the bucket + return my_solist.first_real_iterator(it); + } + + const_local_iterator unsafe_cbegin(size_type bucket) const { + return ((const self_type *) this)->unsafe_begin(bucket); + } + + const_local_iterator unsafe_cend(size_type bucket) const { + return ((const self_type *) this)->unsafe_end(bucket); + } + + // Hash policy + float load_factor() const { + return (float) size() / (float) unsafe_bucket_count(); + } + + float max_load_factor() const { + return my_maximum_bucket_size; + } + + void max_load_factor(float newmax) { + if (newmax != newmax || newmax < 0) + tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor); + my_maximum_bucket_size = newmax; + } + + // This function is a noop, because the underlying split-ordered list + // is already sorted, so an increase in the bucket number will be + // reflected next time this bucket is touched. + void rehash(size_type buckets) { + size_type current_buckets = my_number_of_buckets; + if (current_buckets >= buckets) + return; + my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2 + } + +private: + + // Initialize the hash and keep the first bucket open + void internal_init() { + // Initialize the array of segment pointers + memset(my_buckets, 0, sizeof(my_buckets)); + + // Initialize bucket 0 + raw_iterator dummy_node = my_solist.raw_begin(); + set_bucket(0, dummy_node); + } + + void internal_clear() { + for (size_type index = 0; index < pointers_per_table; ++index) { + if (my_buckets[index] != NULL) { + size_type sz = segment_size(index); + for (size_type index2 = 0; index2 < sz; ++index2) + my_allocator.destroy(&my_buckets[index][index2]); + my_allocator.deallocate(my_buckets[index], sz); + my_buckets[index] = 0; + } + } + } + + void internal_copy(const self_type& right) { + clear(); + + my_maximum_bucket_size = right.my_maximum_bucket_size; + my_number_of_buckets = right.my_number_of_buckets; + + __TBB_TRY { + insert(right.begin(), right.end()); + my_hash_compare = right.my_hash_compare; + } __TBB_CATCH(...) { + my_solist.clear(); + __TBB_RETHROW(); + } + } + + void internal_swap_buckets(concurrent_unordered_base& right) + { + // Swap all node segments + for (size_type index = 0; index < pointers_per_table; ++index) + { + raw_iterator * iterator_pointer = my_buckets[index]; + my_buckets[index] = right.my_buckets[index]; + right.my_buckets[index] = iterator_pointer; + } + } + + //TODO: why not use std::distance? + // Hash APIs + static size_type internal_distance(const_iterator first, const_iterator last) + { + size_type num = 0; + + for (const_iterator it = first; it != last; ++it) + ++num; + + return num; + } + + // Insert an element in the hash given its value + template + std::pair internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL) + { + const key_type *pkey = &get_key(value); + sokey_t hash_key = (sokey_t) my_hash_compare(*pkey); + size_type new_count = 0; + sokey_t order_key = split_order_key_regular(hash_key); + raw_iterator previous = prepare_bucket(hash_key); + raw_iterator last = my_solist.raw_end(); + __TBB_ASSERT(previous != last, "Invalid head node"); + + if (pnode) { + // Set new order_key to node + pnode->init(order_key); + } + + // First node is a dummy node + for (raw_iterator where = previous;;) + { + ++where; + if (where == last || solist_t::get_order_key(where) > order_key || + // if multimapped, stop at the first item equal to us. + (allow_multimapping && solist_t::get_order_key(where) == order_key && + !my_hash_compare(get_key(*where), *pkey))) // TODO: fix negation + { + if (!pnode) { + pnode = my_solist.create_node(order_key, tbb::internal::forward(value), AllowCreate()); + // If the value was moved, the known reference to key might be invalid + pkey = &get_key(pnode->my_element); + } + + // Try to insert 'pnode' between 'previous' and 'where' + std::pair result = my_solist.try_insert(previous, where, pnode, &new_count); + + if (result.second) + { + // Insertion succeeded, adjust the table size, if needed + adjust_table_size(new_count, my_number_of_buckets); + return result; + } + else + { + // Insertion failed: either the same node was inserted by another thread, or + // another element was inserted at exactly the same place as this node. + // Proceed with the search from the previous location where order key was + // known to be larger (note: this is legal only because there is no safe + // concurrent erase operation supported). + where = previous; + continue; + } + } + else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && + !my_hash_compare(get_key(*where), *pkey)) // TODO: fix negation + { // Element already in the list, return it + if (pnode && AllowDestroy::value) + my_solist.destroy_node(pnode); + return std::pair(my_solist.get_iterator(where), false); + } + // Move the iterator forward + previous = where; + } + } + + // Find the element in the split-ordered list + iterator internal_find(const key_type& key) + { + sokey_t hash_key = (sokey_t) my_hash_compare(key); + sokey_t order_key = split_order_key_regular(hash_key); + raw_iterator last = my_solist.raw_end(); + + for (raw_iterator it = prepare_bucket(hash_key); it != last; ++it) + { + if (solist_t::get_order_key(it) > order_key) + { + // If the order key is smaller than the current order key, the element + // is not in the hash. + return end(); + } + else if (solist_t::get_order_key(it) == order_key) + { + // The fact that order keys match does not mean that the element is found. + // Key function comparison has to be performed to check whether this is the + // right element. If not, keep searching while order key is the same. + if (!my_hash_compare(get_key(*it), key)) // TODO: fix negation + return my_solist.get_iterator(it); + } + } + + return end(); + } + + // Erase an element from the list. This is not a concurrency safe function. + iterator internal_erase(const_iterator it) + { + sokey_t hash_key = (sokey_t) my_hash_compare(get_key(*it)); + raw_iterator previous = prepare_bucket(hash_key); + raw_iterator last = my_solist.raw_end(); + __TBB_ASSERT(previous != last, "Invalid head node"); + + // First node is a dummy node + for (raw_iterator where = previous; where != last; previous = where) { + ++where; + if (my_solist.get_iterator(where) == it) + return my_solist.erase_node(previous, it); + } + return end(); + } + +#if __TBB_UNORDERED_NODE_HANDLE_PRESENT + std::pair internal_extract(const_iterator it) { + sokey_t hash_key = sokey_t(my_hash_compare(get_key(*it))); + raw_iterator previous = prepare_bucket(hash_key); + raw_iterator last = my_solist.raw_end(); + __TBB_ASSERT(previous != last, "Invalid head node"); + + for(raw_iterator where = previous; where != last; previous = where) { + ++where; + if (my_solist.get_iterator(where) == it) { + const_iterator result = it; + my_solist.erase_node(previous, it, /*allow_destroy*/tbb::internal::false_type()); + return std::pair( node_type(result.get_node_ptr()), + previous); + } + } + return std::pair(node_type(), end()); + } +#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT + + // Return the [begin, end) pair of iterators with the same key values. + // This operation makes sense only if mapping is many-to-one. + pairii_t internal_equal_range(const key_type& key) + { + sokey_t hash_key = (sokey_t) my_hash_compare(key); + sokey_t order_key = split_order_key_regular(hash_key); + raw_iterator end_it = my_solist.raw_end(); + + for (raw_iterator it = prepare_bucket(hash_key); it != end_it; ++it) + { + if (solist_t::get_order_key(it) > order_key) + { + // There is no element with the given key + return pairii_t(end(), end()); + } + else if (solist_t::get_order_key(it) == order_key && + !my_hash_compare(get_key(*it), key)) // TODO: fix negation; also below + { + iterator first = my_solist.get_iterator(it); + iterator last = first; + do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) ); + return pairii_t(first, last); + } + } + + return pairii_t(end(), end()); + } + + // Bucket APIs + void init_bucket(size_type bucket) + { + // Bucket 0 has no parent. + __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized"); + + size_type parent_bucket = get_parent(bucket); + + // All parent_bucket buckets have to be initialized before this bucket is + if (!is_initialized(parent_bucket)) + init_bucket(parent_bucket); + + raw_iterator parent = get_bucket(parent_bucket); + + // Create a dummy first node in this bucket + raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket)); + set_bucket(bucket, dummy_node); + } + + void adjust_table_size(size_type total_elements, size_type current_size) + { + // Grow the table by a factor of 2 if possible and needed + if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size ) + { + // Double the size of the hash only if size has not changed in between loads + my_number_of_buckets.compare_and_swap(2u*current_size, current_size); + //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8 + //due to overzealous compiler warnings in /Wp64 mode + } + } + + size_type get_parent(size_type bucket) const + { + // Unsets bucket's most significant turned-on bit + size_type msb = __TBB_Log2((uintptr_t)bucket); + return bucket & ~(size_type(1) << msb); + } + + + // Dynamic sized array (segments) + //! @return segment index of given index in the array + static size_type segment_index_of( size_type index ) { + return size_type( __TBB_Log2( uintptr_t(index|1) ) ); + } + + //! @return the first array index of given segment + static size_type segment_base( size_type k ) { + return (size_type(1)<(new_segment), 0, sz*sizeof(raw_iterator)); + + if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL) + my_allocator.deallocate(new_segment, sz); + } + + my_buckets[segment][bucket] = dummy_head; + } + + bool is_initialized(size_type bucket) const { + size_type segment = segment_index_of(bucket); + bucket -= segment_base(segment); + + if (my_buckets[segment] == NULL) + return false; + + raw_iterator it = my_buckets[segment][bucket]; + return (it.get_node_ptr() != NULL); + } + + // Utilities for keys + + // A regular order key has its original hash value reversed and the last bit set + sokey_t split_order_key_regular(sokey_t order_key) const { + return __TBB_ReverseBits(order_key) | 0x1; + } + + // A dummy order key has its original hash value reversed and the last bit unset + sokey_t split_order_key_dummy(sokey_t order_key) const { + return __TBB_ReverseBits(order_key) & ~sokey_t(0x1); + } + + // Shared variables + atomic my_number_of_buckets; // Current table size + solist_t my_solist; // List where all the elements are kept + typename tbb::internal::allocator_rebind::type my_allocator; // Allocator object for segments + float my_maximum_bucket_size; // Maximum size of the bucket + atomic my_buckets[pointers_per_table]; // The segment table +}; +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +#pragma warning(pop) // warning 4127 is back +#endif + +} // namespace internal +//! @endcond +} // namespace interface5 +} // namespace tbb +#endif // __TBB__concurrent_unordered_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_deprecated_header_message_guard.h b/ohos/arm64-v8a/include/tbb/internal/_deprecated_header_message_guard.h new file mode 100644 index 00000000..fa993572 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_deprecated_header_message_guard.h @@ -0,0 +1,69 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "tbb/tbb_config.h" + +#if (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) && !defined(__TBB_INTERNAL_INCLUDES_DEPRECATION_MESSAGE) && \ +!defined(__TBB_condition_variable_H_include_area) && \ +!defined(__TBB_ppl_H_include_area) && \ +!defined(__TBB_thread_H_include_area) && \ +!defined(__TBB_tuple_H_include_area) && \ +!defined(__TBB_aggregator_H_include_area) && \ +!defined(__TBB_aligned_space_H_include_area) && \ +!defined(__TBB_atomic_H_include_area) && \ +!defined(__TBB_combinable_H_include_area) && \ +!defined(__TBB_concurrent_hash_map_H_include_area) && \ +!defined(__TBB_concurrent_lru_cache_H_include_area) && \ +!defined(__TBB_concurrent_map_H_include_area) && \ +!defined(__TBB_concurrent_priority_queue_H_include_area) && \ +!defined(__TBB_concurrent_queue_H_include_area) && \ +!defined(__TBB_concurrent_set_H_include_area) && \ +!defined(__TBB_concurrent_unordered_map_H_include_area) && \ +!defined(__TBB_concurrent_unordered_set_H_include_area) && \ +!defined(__TBB_concurrent_vector_H_include_area) && \ +!defined(__TBB_critical_section_H_include_area) && \ +!defined(__TBB_enumerable_thread_specific_H_include_area) && \ +!defined(__TBB_flow_graph_opencl_node_H_include_area) && \ +!defined(__TBB_flow_graph_H_include_area) && \ +!defined(__TBB_mutex_H_include_area) && \ +!defined(__TBB_parallel_do_H_include_area) && \ +!defined(__TBB_parallel_for_H_include_area) && \ +!defined(__TBB_parallel_invoke_H_include_area) && \ +!defined(__TBB_parallel_reduce_H_include_area) && \ +!defined(__TBB_parallel_scan_H_include_area) && \ +!defined(__TBB_parallel_sort_H_include_area) && \ +!defined(__TBB_parallel_while_H_include_area) && \ +!defined(__TBB_partitioner_H_include_area) && \ +!defined(__TBB_pipeline_H_include_area) && \ +!defined(__TBB_queuing_mutex_H_include_area) && \ +!defined(__TBB_queuing_rw_mutex_H_include_area) && \ +!defined(__TBB_reader_writer_lock_H_include_area) && \ +!defined(__TBB_recursive_mutex_H_include_area) && \ +!defined(__TBB_runtime_loader_H_include_area) && \ +!defined(__TBB_task_scheduler_init_H_include_area) && \ +!defined(__TBB_spin_mutex_H_include_area) && \ +!defined(__TBB_task_arena_H_include_area) && \ +!defined(__TBB_task_group_H_include_area) && \ +!defined(__TBB_task_scheduler_observer_H_include_area) && \ +!defined(__TBB_task_H_include_area) && \ +!defined(__TBB_tbb_exception_H_include_area) && \ +!defined(__TBB_tbb_profiling_H_include_area) && \ +!defined(__TBB_tbb_thread_H_include_area) && \ +!defined(__TBB_tbb_H_include_area) + +#define __TBB_show_deprecated_header_message + +#endif diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_async_msg_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_async_msg_impl.h new file mode 100644 index 00000000..9f269ffd --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_async_msg_impl.h @@ -0,0 +1,153 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_async_msg_impl_H +#define __TBB__flow_graph_async_msg_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +namespace internal { + +template +class async_storage { +public: + typedef receiver async_storage_client; + + async_storage() : my_graph(nullptr) { + my_data_ready.store(false); + } + + ~async_storage() { + // Release reference to the graph if async_storage + // was destructed before set() call + if (my_graph) { + my_graph->release_wait(); + my_graph = nullptr; + } + } + + template + async_storage(C&& data) : my_graph(nullptr), my_data( std::forward(data) ) { + using namespace tbb::internal; + __TBB_STATIC_ASSERT( (is_same_type::type, typename strip::type>::value), "incoming type must be T" ); + + my_data_ready.store(true); + } + + template + bool set(C&& data) { + using namespace tbb::internal; + __TBB_STATIC_ASSERT( (is_same_type::type, typename strip::type>::value), "incoming type must be T" ); + + { + tbb::spin_mutex::scoped_lock locker(my_mutex); + + if (my_data_ready.load()) { + __TBB_ASSERT(false, "double set() call"); + return false; + } + + my_data = std::forward(data); + my_data_ready.store(true); + } + + // Thread sync is on my_data_ready flag + for (typename subscriber_list_type::iterator it = my_clients.begin(); it != my_clients.end(); ++it) { + (*it)->try_put(my_data); + } + + // Data was sent, release reference to the graph + if (my_graph) { + my_graph->release_wait(); + my_graph = nullptr; + } + + return true; + } + + task* subscribe(async_storage_client& client, graph& g) { + if (! my_data_ready.load()) + { + tbb::spin_mutex::scoped_lock locker(my_mutex); + + if (! my_data_ready.load()) { +#if TBB_USE_ASSERT + for (typename subscriber_list_type::iterator it = my_clients.begin(); it != my_clients.end(); ++it) { + __TBB_ASSERT(*it != &client, "unexpected double subscription"); + } +#endif // TBB_USE_ASSERT + + // Increase graph lifetime + my_graph = &g; + my_graph->reserve_wait(); + + // Subscribe + my_clients.push_back(&client); + return SUCCESSFULLY_ENQUEUED; + } + } + + __TBB_ASSERT(my_data_ready.load(), "data is NOT ready"); + return client.try_put_task(my_data); + } + +private: + graph* my_graph; + tbb::spin_mutex my_mutex; + tbb::atomic my_data_ready; + T my_data; + typedef std::vector subscriber_list_type; + subscriber_list_type my_clients; +}; + +} // namespace internal + +template +class __TBB_DEPRECATED async_msg { + template< typename > friend class receiver; + template< typename, typename > friend struct internal::async_helpers; +public: + typedef T async_msg_data_type; + + async_msg() : my_storage(std::make_shared< internal::async_storage >()) {} + + async_msg(const T& t) : my_storage(std::make_shared< internal::async_storage >(t)) {} + + async_msg(T&& t) : my_storage(std::make_shared< internal::async_storage >( std::move(t) )) {} + + virtual ~async_msg() {} + + void set(const T& t) { + my_storage->set(t); + } + + void set(T&& t) { + my_storage->set( std::move(t) ); + } + +protected: + // Can be overridden in derived class to inform that + // async calculation chain is over + virtual void finalize() const {} + +private: + typedef std::shared_ptr< internal::async_storage > async_storage_ptr; + async_storage_ptr my_storage; +}; + +#endif // __TBB__flow_graph_async_msg_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_body_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_body_impl.h new file mode 100644 index 00000000..5d3ad6a0 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_body_impl.h @@ -0,0 +1,449 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_body_impl_H +#define __TBB__flow_graph_body_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::flow::interfaceX (in flow_graph.h) + +namespace internal { + +typedef tbb::internal::uint64_t tag_value; + +using tbb::internal::strip; + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + +template struct Policy {}; + +template struct has_policy; + +template +struct has_policy : + tbb::internal::bool_constant::value || + has_policy::value> {}; + +template +struct has_policy : + tbb::internal::bool_constant::value> {}; + +template +struct has_policy > : has_policy {}; + +#else + +template struct Policy {}; + +template +struct has_policy : tbb::internal::bool_constant::value> {}; + +template +struct has_policy > : has_policy {}; + +template +struct has_policy > : + tbb::internal::bool_constant::value || has_policy::value> {}; + +#endif + +namespace graph_policy_namespace { + + struct rejecting { }; + struct reserving { }; + struct queueing { }; + struct lightweight { }; + + // K == type of field used for key-matching. Each tag-matching port will be provided + // functor that, given an object accepted by the port, will return the + /// field of type K being used for matching. + template::type > > + struct key_matching { + typedef K key_type; + typedef typename strip::type base_key_type; + typedef KHash hash_compare_type; + }; + + // old tag_matching join's new specifier + typedef key_matching tag_matching; + + // Aliases for Policy combinations + typedef interface11::internal::Policy queueing_lightweight; + typedef interface11::internal::Policy rejecting_lightweight; + +} // namespace graph_policy_namespace + +// -------------- function_body containers ---------------------- + +//! A functor that takes no input and generates a value of type Output +template< typename Output > +class source_body : tbb::internal::no_assign { +public: + virtual ~source_body() {} + virtual bool operator()(Output &output) = 0; + virtual source_body* clone() = 0; +}; + +//! The leaf for source_body +template< typename Output, typename Body> +class source_body_leaf : public source_body { +public: + source_body_leaf( const Body &_body ) : body(_body) { } + bool operator()(Output &output) __TBB_override { return body( output ); } + source_body_leaf* clone() __TBB_override { + return new source_body_leaf< Output, Body >(body); + } + Body get_body() { return body; } +private: + Body body; +}; + +//! A functor that takes an Input and generates an Output +template< typename Input, typename Output > +class function_body : tbb::internal::no_assign { +public: + virtual ~function_body() {} + virtual Output operator()(const Input &input) = 0; + virtual function_body* clone() = 0; +}; + +//! the leaf for function_body +template +class function_body_leaf : public function_body< Input, Output > { +public: + function_body_leaf( const B &_body ) : body(_body) { } + Output operator()(const Input &i) __TBB_override { return body(i); } + B get_body() { return body; } + function_body_leaf* clone() __TBB_override { + return new function_body_leaf< Input, Output, B >(body); + } +private: + B body; +}; + +//! the leaf for function_body specialized for Input and output of continue_msg +template +class function_body_leaf< continue_msg, continue_msg, B> : public function_body< continue_msg, continue_msg > { +public: + function_body_leaf( const B &_body ) : body(_body) { } + continue_msg operator()( const continue_msg &i ) __TBB_override { + body(i); + return i; + } + B get_body() { return body; } + function_body_leaf* clone() __TBB_override { + return new function_body_leaf< continue_msg, continue_msg, B >(body); + } +private: + B body; +}; + +//! the leaf for function_body specialized for Output of continue_msg +template +class function_body_leaf< Input, continue_msg, B> : public function_body< Input, continue_msg > { +public: + function_body_leaf( const B &_body ) : body(_body) { } + continue_msg operator()(const Input &i) __TBB_override { + body(i); + return continue_msg(); + } + B get_body() { return body; } + function_body_leaf* clone() __TBB_override { + return new function_body_leaf< Input, continue_msg, B >(body); + } +private: + B body; +}; + +//! the leaf for function_body specialized for Input of continue_msg +template +class function_body_leaf< continue_msg, Output, B > : public function_body< continue_msg, Output > { +public: + function_body_leaf( const B &_body ) : body(_body) { } + Output operator()(const continue_msg &i) __TBB_override { + return body(i); + } + B get_body() { return body; } + function_body_leaf* clone() __TBB_override { + return new function_body_leaf< continue_msg, Output, B >(body); + } +private: + B body; +}; + +//! function_body that takes an Input and a set of output ports +template +class multifunction_body : tbb::internal::no_assign { +public: + virtual ~multifunction_body () {} + virtual void operator()(const Input &/* input*/, OutputSet &/*oset*/) = 0; + virtual multifunction_body* clone() = 0; + virtual void* get_body_ptr() = 0; +}; + +//! leaf for multifunction. OutputSet can be a std::tuple or a vector. +template +class multifunction_body_leaf : public multifunction_body { +public: + multifunction_body_leaf(const B &_body) : body(_body) { } + void operator()(const Input &input, OutputSet &oset) __TBB_override { + body(input, oset); // body may explicitly put() to one or more of oset. + } + void* get_body_ptr() __TBB_override { return &body; } + multifunction_body_leaf* clone() __TBB_override { + return new multifunction_body_leaf(body); + } + +private: + B body; +}; + +// ------ function bodies for hash_buffers and key-matching joins. + +template +class type_to_key_function_body : tbb::internal::no_assign { + public: + virtual ~type_to_key_function_body() {} + virtual Output operator()(const Input &input) = 0; // returns an Output + virtual type_to_key_function_body* clone() = 0; +}; + +// specialization for ref output +template +class type_to_key_function_body : tbb::internal::no_assign { + public: + virtual ~type_to_key_function_body() {} + virtual const Output & operator()(const Input &input) = 0; // returns a const Output& + virtual type_to_key_function_body* clone() = 0; +}; + +template +class type_to_key_function_body_leaf : public type_to_key_function_body { +public: + type_to_key_function_body_leaf( const B &_body ) : body(_body) { } + Output operator()(const Input &i) __TBB_override { return body(i); } + B get_body() { return body; } + type_to_key_function_body_leaf* clone() __TBB_override { + return new type_to_key_function_body_leaf< Input, Output, B>(body); + } +private: + B body; +}; + +template +class type_to_key_function_body_leaf : public type_to_key_function_body< Input, Output&> { +public: + type_to_key_function_body_leaf( const B &_body ) : body(_body) { } + const Output& operator()(const Input &i) __TBB_override { + return body(i); + } + B get_body() { return body; } + type_to_key_function_body_leaf* clone() __TBB_override { + return new type_to_key_function_body_leaf< Input, Output&, B>(body); + } +private: + B body; +}; + +// --------------------------- end of function_body containers ------------------------ + +// --------------------------- node task bodies --------------------------------------- + +//! A task that calls a node's forward_task function +template< typename NodeType > +class forward_task_bypass : public graph_task { + + NodeType &my_node; + +public: + + forward_task_bypass( NodeType &n +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + , node_priority_t node_priority = no_priority + ) : graph_task(node_priority), +#else + ) : +#endif + my_node(n) {} + + task *execute() __TBB_override { + task * new_task = my_node.forward_task(); + if (new_task == SUCCESSFULLY_ENQUEUED) new_task = NULL; + return new_task; + } +}; + +//! A task that calls a node's apply_body_bypass function, passing in an input of type Input +// return the task* unless it is SUCCESSFULLY_ENQUEUED, in which case return NULL +template< typename NodeType, typename Input > +class apply_body_task_bypass : public graph_task { + + NodeType &my_node; + Input my_input; + +public: + + apply_body_task_bypass( NodeType &n, const Input &i +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + , node_priority_t node_priority = no_priority + ) : graph_task(node_priority), +#else + ) : +#endif + my_node(n), my_input(i) {} + + task *execute() __TBB_override { + task * next_task = my_node.apply_body_bypass( my_input ); + if(next_task == SUCCESSFULLY_ENQUEUED) next_task = NULL; + return next_task; + } +}; + +//! A task that calls a node's apply_body_bypass function with no input +template< typename NodeType > +class source_task_bypass : public graph_task { + + NodeType &my_node; + +public: + + source_task_bypass( NodeType &n ) : my_node(n) {} + + task *execute() __TBB_override { + task *new_task = my_node.apply_body_bypass( ); + if(new_task == SUCCESSFULLY_ENQUEUED) return NULL; + return new_task; + } +}; + +// ------------------------ end of node task bodies ----------------------------------- + +//! An empty functor that takes an Input and returns a default constructed Output +template< typename Input, typename Output > +struct empty_body { + Output operator()( const Input & ) const { return Output(); } +}; + +template +class decrementer; + +template +class decrementer::value, void>::type + > : public receiver, tbb::internal::no_copy { + T* my_node; +protected: + + task* try_put_task( const DecrementType& value ) __TBB_override { + task* result = my_node->decrement_counter( value ); + if( !result ) + result = SUCCESSFULLY_ENQUEUED; + return result; + } + + graph& graph_reference() const __TBB_override { + return my_node->my_graph; + } + + template friend class tbb::flow::interface11::limiter_node; + void reset_receiver( reset_flags f ) __TBB_override { +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + if (f & rf_clear_edges) + my_built_predecessors.clear(); +#else + tbb::internal::suppress_unused_warning( f ); +#endif + } + +public: + // Since decrementer does not make use of possibly unconstructed owner inside its + // constructor, my_node can be directly initialized with 'this' pointer passed from the + // owner, hence making method 'set_owner' needless. + decrementer() : my_node(NULL) {} + void set_owner( T *node ) { my_node = node; } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + spin_mutex my_mutex; + //! The predecessor type for this node + typedef typename receiver::predecessor_type predecessor_type; + + typedef internal::edge_container built_predecessors_type; + typedef typename built_predecessors_type::edge_list_type predecessor_list_type; + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + + void internal_add_built_predecessor( predecessor_type &s) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.add_edge( s ); + } + + void internal_delete_built_predecessor( predecessor_type &s) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.delete_edge(s); + } + + void copy_predecessors( predecessor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + my_built_predecessors.copy_edges(v); + } + + size_t predecessor_count() __TBB_override { + spin_mutex::scoped_lock l(my_mutex); + return my_built_predecessors.edge_count(); + } +protected: + built_predecessors_type my_built_predecessors; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ +}; + +template +class decrementer : public continue_receiver, tbb::internal::no_copy { + + T *my_node; + + task *execute() __TBB_override { + return my_node->decrement_counter( 1 ); + } + +protected: + + graph& graph_reference() const __TBB_override { + return my_node->my_graph; + } + +public: + + typedef continue_msg input_type; + typedef continue_msg output_type; + decrementer( int number_of_predecessors = 0 ) + : continue_receiver( + __TBB_FLOW_GRAPH_PRIORITY_ARG1(number_of_predecessors, tbb::flow::internal::no_priority) + ) + // Since decrementer does not make use of possibly unconstructed owner inside its + // constructor, my_node can be directly initialized with 'this' pointer passed from the + // owner, hence making method 'set_owner' needless. + , my_node(NULL) + {} + void set_owner( T *node ) { my_node = node; } +}; + +} // namespace internal + +#endif // __TBB__flow_graph_body_impl_H + diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_cache_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_cache_impl.h new file mode 100644 index 00000000..b670ae65 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_cache_impl.h @@ -0,0 +1,592 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_cache_impl_H +#define __TBB__flow_graph_cache_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::flow::interfaceX (in flow_graph.h) + +namespace internal { + +//! A node_cache maintains a std::queue of elements of type T. Each operation is protected by a lock. +template< typename T, typename M=spin_mutex > +class node_cache { + public: + + typedef size_t size_type; + + bool empty() { + typename mutex_type::scoped_lock lock( my_mutex ); + return internal_empty(); + } + + void add( T &n ) { + typename mutex_type::scoped_lock lock( my_mutex ); + internal_push(n); + } + + void remove( T &n ) { + typename mutex_type::scoped_lock lock( my_mutex ); + for ( size_t i = internal_size(); i != 0; --i ) { + T &s = internal_pop(); + if ( &s == &n ) return; // only remove one predecessor per request + internal_push(s); + } + } + + void clear() { + while( !my_q.empty()) (void)my_q.pop(); +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + my_built_predecessors.clear(); +#endif + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef edge_container built_predecessors_type; + built_predecessors_type &built_predecessors() { return my_built_predecessors; } + + typedef typename edge_container::edge_list_type predecessor_list_type; + void internal_add_built_predecessor( T &n ) { + typename mutex_type::scoped_lock lock( my_mutex ); + my_built_predecessors.add_edge(n); + } + + void internal_delete_built_predecessor( T &n ) { + typename mutex_type::scoped_lock lock( my_mutex ); + my_built_predecessors.delete_edge(n); + } + + void copy_predecessors( predecessor_list_type &v) { + typename mutex_type::scoped_lock lock( my_mutex ); + my_built_predecessors.copy_edges(v); + } + + size_t predecessor_count() { + typename mutex_type::scoped_lock lock(my_mutex); + return (size_t)(my_built_predecessors.edge_count()); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +protected: + + typedef M mutex_type; + mutex_type my_mutex; + std::queue< T * > my_q; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_predecessors_type my_built_predecessors; +#endif + + // Assumes lock is held + inline bool internal_empty( ) { + return my_q.empty(); + } + + // Assumes lock is held + inline size_type internal_size( ) { + return my_q.size(); + } + + // Assumes lock is held + inline void internal_push( T &n ) { + my_q.push(&n); + } + + // Assumes lock is held + inline T &internal_pop() { + T *v = my_q.front(); + my_q.pop(); + return *v; + } + +}; + +//! A cache of predecessors that only supports try_get +template< typename T, typename M=spin_mutex > +#if __TBB_PREVIEW_ASYNC_MSG +// TODO: make predecessor_cache type T-independent when async_msg becomes regular feature +class predecessor_cache : public node_cache< untyped_sender, M > { +#else +class predecessor_cache : public node_cache< sender, M > { +#endif // __TBB_PREVIEW_ASYNC_MSG +public: + typedef M mutex_type; + typedef T output_type; +#if __TBB_PREVIEW_ASYNC_MSG + typedef untyped_sender predecessor_type; + typedef untyped_receiver successor_type; +#else + typedef sender predecessor_type; + typedef receiver successor_type; +#endif // __TBB_PREVIEW_ASYNC_MSG + + predecessor_cache( ) : my_owner( NULL ) { } + + void set_owner( successor_type *owner ) { my_owner = owner; } + + bool get_item( output_type &v ) { + + bool msg = false; + + do { + predecessor_type *src; + { + typename mutex_type::scoped_lock lock(this->my_mutex); + if ( this->internal_empty() ) { + break; + } + src = &this->internal_pop(); + } + + // Try to get from this sender + msg = src->try_get( v ); + + if (msg == false) { + // Relinquish ownership of the edge + if (my_owner) + src->register_successor( *my_owner ); + } else { + // Retain ownership of the edge + this->add(*src); + } + } while ( msg == false ); + return msg; + } + + // If we are removing arcs (rf_clear_edges), call clear() rather than reset(). + void reset() { + if (my_owner) { + for(;;) { + predecessor_type *src; + { + if (this->internal_empty()) break; + src = &this->internal_pop(); + } + src->register_successor( *my_owner ); + } + } + } + +protected: + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + using node_cache< predecessor_type, M >::my_built_predecessors; +#endif + successor_type *my_owner; +}; + +//! An cache of predecessors that supports requests and reservations +// TODO: make reservable_predecessor_cache type T-independent when async_msg becomes regular feature +template< typename T, typename M=spin_mutex > +class reservable_predecessor_cache : public predecessor_cache< T, M > { +public: + typedef M mutex_type; + typedef T output_type; +#if __TBB_PREVIEW_ASYNC_MSG + typedef untyped_sender predecessor_type; + typedef untyped_receiver successor_type; +#else + typedef sender predecessor_type; + typedef receiver successor_type; +#endif // __TBB_PREVIEW_ASYNC_MSG + + reservable_predecessor_cache( ) : reserved_src(NULL) { } + + bool + try_reserve( output_type &v ) { + bool msg = false; + + do { + { + typename mutex_type::scoped_lock lock(this->my_mutex); + if ( reserved_src || this->internal_empty() ) + return false; + + reserved_src = &this->internal_pop(); + } + + // Try to get from this sender + msg = reserved_src->try_reserve( v ); + + if (msg == false) { + typename mutex_type::scoped_lock lock(this->my_mutex); + // Relinquish ownership of the edge + reserved_src->register_successor( *this->my_owner ); + reserved_src = NULL; + } else { + // Retain ownership of the edge + this->add( *reserved_src ); + } + } while ( msg == false ); + + return msg; + } + + bool + try_release( ) { + reserved_src->try_release( ); + reserved_src = NULL; + return true; + } + + bool + try_consume( ) { + reserved_src->try_consume( ); + reserved_src = NULL; + return true; + } + + void reset( ) { + reserved_src = NULL; + predecessor_cache::reset( ); + } + + void clear() { + reserved_src = NULL; + predecessor_cache::clear(); + } + +private: + predecessor_type *reserved_src; +}; + + +//! An abstract cache of successors +// TODO: make successor_cache type T-independent when async_msg becomes regular feature +template +class successor_cache : tbb::internal::no_copy { +protected: + + typedef M mutex_type; + mutex_type my_mutex; + +#if __TBB_PREVIEW_ASYNC_MSG + typedef untyped_receiver successor_type; + typedef untyped_receiver *pointer_type; + typedef untyped_sender owner_type; +#else + typedef receiver successor_type; + typedef receiver *pointer_type; + typedef sender owner_type; +#endif // __TBB_PREVIEW_ASYNC_MSG + typedef std::list< pointer_type > successors_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + edge_container my_built_successors; +#endif + successors_type my_successors; + + owner_type *my_owner; + +public: +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename edge_container::edge_list_type successor_list_type; + + edge_container &built_successors() { return my_built_successors; } + + void internal_add_built_successor( successor_type &r) { + typename mutex_type::scoped_lock l(my_mutex, true); + my_built_successors.add_edge( r ); + } + + void internal_delete_built_successor( successor_type &r) { + typename mutex_type::scoped_lock l(my_mutex, true); + my_built_successors.delete_edge(r); + } + + void copy_successors( successor_list_type &v) { + typename mutex_type::scoped_lock l(my_mutex, false); + my_built_successors.copy_edges(v); + } + + size_t successor_count() { + typename mutex_type::scoped_lock l(my_mutex,false); + return my_built_successors.edge_count(); + } + +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + successor_cache( ) : my_owner(NULL) {} + + void set_owner( owner_type *owner ) { my_owner = owner; } + + virtual ~successor_cache() {} + + void register_successor( successor_type &r ) { + typename mutex_type::scoped_lock l(my_mutex, true); + my_successors.push_back( &r ); + } + + void remove_successor( successor_type &r ) { + typename mutex_type::scoped_lock l(my_mutex, true); + for ( typename successors_type::iterator i = my_successors.begin(); + i != my_successors.end(); ++i ) { + if ( *i == & r ) { + my_successors.erase(i); + break; + } + } + } + + bool empty() { + typename mutex_type::scoped_lock l(my_mutex, false); + return my_successors.empty(); + } + + void clear() { + my_successors.clear(); +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + my_built_successors.clear(); +#endif + } + +#if !__TBB_PREVIEW_ASYNC_MSG + virtual task * try_put_task( const T &t ) = 0; +#endif // __TBB_PREVIEW_ASYNC_MSG + }; // successor_cache + +//! An abstract cache of successors, specialized to continue_msg +template +class successor_cache< continue_msg, M > : tbb::internal::no_copy { +protected: + + typedef M mutex_type; + mutex_type my_mutex; + +#if __TBB_PREVIEW_ASYNC_MSG + typedef untyped_receiver successor_type; + typedef untyped_receiver *pointer_type; +#else + typedef receiver successor_type; + typedef receiver *pointer_type; +#endif // __TBB_PREVIEW_ASYNC_MSG + typedef std::list< pointer_type > successors_type; + successors_type my_successors; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + edge_container my_built_successors; + typedef edge_container::edge_list_type successor_list_type; +#endif + + sender *my_owner; + +public: + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + + edge_container &built_successors() { return my_built_successors; } + + void internal_add_built_successor( successor_type &r) { + typename mutex_type::scoped_lock l(my_mutex, true); + my_built_successors.add_edge( r ); + } + + void internal_delete_built_successor( successor_type &r) { + typename mutex_type::scoped_lock l(my_mutex, true); + my_built_successors.delete_edge(r); + } + + void copy_successors( successor_list_type &v) { + typename mutex_type::scoped_lock l(my_mutex, false); + my_built_successors.copy_edges(v); + } + + size_t successor_count() { + typename mutex_type::scoped_lock l(my_mutex,false); + return my_built_successors.edge_count(); + } + +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + successor_cache( ) : my_owner(NULL) {} + + void set_owner( sender *owner ) { my_owner = owner; } + + virtual ~successor_cache() {} + + void register_successor( successor_type &r ) { + typename mutex_type::scoped_lock l(my_mutex, true); + my_successors.push_back( &r ); + if ( my_owner && r.is_continue_receiver() ) { + r.register_predecessor( *my_owner ); + } + } + + void remove_successor( successor_type &r ) { + typename mutex_type::scoped_lock l(my_mutex, true); + for ( successors_type::iterator i = my_successors.begin(); + i != my_successors.end(); ++i ) { + if ( *i == & r ) { + // TODO: Check if we need to test for continue_receiver before + // removing from r. + if ( my_owner ) + r.remove_predecessor( *my_owner ); + my_successors.erase(i); + break; + } + } + } + + bool empty() { + typename mutex_type::scoped_lock l(my_mutex, false); + return my_successors.empty(); + } + + void clear() { + my_successors.clear(); +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + my_built_successors.clear(); +#endif + } + +#if !__TBB_PREVIEW_ASYNC_MSG + virtual task * try_put_task( const continue_msg &t ) = 0; +#endif // __TBB_PREVIEW_ASYNC_MSG + +}; // successor_cache< continue_msg > + +//! A cache of successors that are broadcast to +// TODO: make broadcast_cache type T-independent when async_msg becomes regular feature +template +class broadcast_cache : public successor_cache { + typedef M mutex_type; + typedef typename successor_cache::successors_type successors_type; + +public: + + broadcast_cache( ) {} + + // as above, but call try_put_task instead, and return the last task we received (if any) +#if __TBB_PREVIEW_ASYNC_MSG + template + task * try_put_task( const X &t ) { +#else + task * try_put_task( const T &t ) __TBB_override { +#endif // __TBB_PREVIEW_ASYNC_MSG + task * last_task = NULL; + bool upgraded = true; + typename mutex_type::scoped_lock l(this->my_mutex, upgraded); + typename successors_type::iterator i = this->my_successors.begin(); + while ( i != this->my_successors.end() ) { + task *new_task = (*i)->try_put_task(t); + // workaround for icc bug + graph& graph_ref = (*i)->graph_reference(); + last_task = combine_tasks(graph_ref, last_task, new_task); // enqueue if necessary + if(new_task) { + ++i; + } + else { // failed + if ( (*i)->register_predecessor(*this->my_owner) ) { + if (!upgraded) { + l.upgrade_to_writer(); + upgraded = true; + } + i = this->my_successors.erase(i); + } else { + ++i; + } + } + } + return last_task; + } + + // call try_put_task and return list of received tasks +#if __TBB_PREVIEW_ASYNC_MSG + template + bool gather_successful_try_puts( const X &t, task_list &tasks ) { +#else + bool gather_successful_try_puts( const T &t, task_list &tasks ) { +#endif // __TBB_PREVIEW_ASYNC_MSG + bool upgraded = true; + bool is_at_least_one_put_successful = false; + typename mutex_type::scoped_lock l(this->my_mutex, upgraded); + typename successors_type::iterator i = this->my_successors.begin(); + while ( i != this->my_successors.end() ) { + task * new_task = (*i)->try_put_task(t); + if(new_task) { + ++i; + if(new_task != SUCCESSFULLY_ENQUEUED) { + tasks.push_back(*new_task); + } + is_at_least_one_put_successful = true; + } + else { // failed + if ( (*i)->register_predecessor(*this->my_owner) ) { + if (!upgraded) { + l.upgrade_to_writer(); + upgraded = true; + } + i = this->my_successors.erase(i); + } else { + ++i; + } + } + } + return is_at_least_one_put_successful; + } +}; + +//! A cache of successors that are put in a round-robin fashion +// TODO: make round_robin_cache type T-independent when async_msg becomes regular feature +template +class round_robin_cache : public successor_cache { + typedef size_t size_type; + typedef M mutex_type; + typedef typename successor_cache::successors_type successors_type; + +public: + + round_robin_cache( ) {} + + size_type size() { + typename mutex_type::scoped_lock l(this->my_mutex, false); + return this->my_successors.size(); + } + +#if __TBB_PREVIEW_ASYNC_MSG + template + task * try_put_task( const X &t ) { +#else + task *try_put_task( const T &t ) __TBB_override { +#endif // __TBB_PREVIEW_ASYNC_MSG + bool upgraded = true; + typename mutex_type::scoped_lock l(this->my_mutex, upgraded); + typename successors_type::iterator i = this->my_successors.begin(); + while ( i != this->my_successors.end() ) { + task *new_task = (*i)->try_put_task(t); + if ( new_task ) { + return new_task; + } else { + if ( (*i)->register_predecessor(*this->my_owner) ) { + if (!upgraded) { + l.upgrade_to_writer(); + upgraded = true; + } + i = this->my_successors.erase(i); + } + else { + ++i; + } + } + } + return NULL; + } +}; + +} // namespace internal + +#endif // __TBB__flow_graph_cache_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_impl.h new file mode 100644 index 00000000..2f18676e --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_impl.h @@ -0,0 +1,547 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_impl_H +#define __TBB_flow_graph_impl_H + +#include "../tbb_stddef.h" +#include "../task.h" +#include "../task_arena.h" +#include "../flow_graph_abstractions.h" + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#include "../concurrent_priority_queue.h" +#endif + +#include + +#if TBB_DEPRECATED_FLOW_ENQUEUE +#define FLOW_SPAWN(a) tbb::task::enqueue((a)) +#else +#define FLOW_SPAWN(a) tbb::task::spawn((a)) +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#define __TBB_FLOW_GRAPH_PRIORITY_EXPR( expr ) expr +#define __TBB_FLOW_GRAPH_PRIORITY_ARG0( priority ) , priority +#define __TBB_FLOW_GRAPH_PRIORITY_ARG1( arg1, priority ) arg1, priority +#else +#define __TBB_FLOW_GRAPH_PRIORITY_EXPR( expr ) +#define __TBB_FLOW_GRAPH_PRIORITY_ARG0( priority ) +#define __TBB_FLOW_GRAPH_PRIORITY_ARG1( arg1, priority ) arg1 +#endif // __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + +#if TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR +#define __TBB_DEPRECATED_LIMITER_EXPR( expr ) expr +#define __TBB_DEPRECATED_LIMITER_ARG2( arg1, arg2 ) arg1, arg2 +#define __TBB_DEPRECATED_LIMITER_ARG4( arg1, arg2, arg3, arg4 ) arg1, arg3, arg4 +#else +#define __TBB_DEPRECATED_LIMITER_EXPR( expr ) +#define __TBB_DEPRECATED_LIMITER_ARG2( arg1, arg2 ) arg1 +#define __TBB_DEPRECATED_LIMITER_ARG4( arg1, arg2, arg3, arg4 ) arg1, arg2 +#endif // TBB_DEPRECATED_LIMITER_NODE_CONSTRUCTOR + +namespace tbb { +namespace flow { + +namespace internal { +static tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1; +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +typedef unsigned int node_priority_t; +static const node_priority_t no_priority = node_priority_t(0); +#endif +} + +namespace interface10 { +class graph; +} + +namespace interface11 { + +using tbb::flow::internal::SUCCESSFULLY_ENQUEUED; + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +using tbb::flow::internal::node_priority_t; +using tbb::flow::internal::no_priority; +//! Base class for tasks generated by graph nodes. +struct graph_task : public task { + graph_task( node_priority_t node_priority = no_priority ) : priority( node_priority ) {} + node_priority_t priority; +}; +#else +typedef task graph_task; +#endif /* __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES */ + +class graph_node; + +template +class graph_iterator { + friend class tbb::flow::interface10::graph; + friend class graph_node; +public: + typedef size_t size_type; + typedef GraphNodeType value_type; + typedef GraphNodeType* pointer; + typedef GraphNodeType& reference; + typedef const GraphNodeType& const_reference; + typedef std::forward_iterator_tag iterator_category; + + //! Default constructor + graph_iterator() : my_graph(NULL), current_node(NULL) {} + + //! Copy constructor + graph_iterator(const graph_iterator& other) : + my_graph(other.my_graph), current_node(other.current_node) + {} + + //! Assignment + graph_iterator& operator=(const graph_iterator& other) { + if (this != &other) { + my_graph = other.my_graph; + current_node = other.current_node; + } + return *this; + } + + //! Dereference + reference operator*() const; + + //! Dereference + pointer operator->() const; + + //! Equality + bool operator==(const graph_iterator& other) const { + return ((my_graph == other.my_graph) && (current_node == other.current_node)); + } + + //! Inequality + bool operator!=(const graph_iterator& other) const { return !(operator==(other)); } + + //! Pre-increment + graph_iterator& operator++() { + internal_forward(); + return *this; + } + + //! Post-increment + graph_iterator operator++(int) { + graph_iterator result = *this; + operator++(); + return result; + } + +private: + // the graph over which we are iterating + GraphContainerType *my_graph; + // pointer into my_graph's my_nodes list + pointer current_node; + + //! Private initializing constructor for begin() and end() iterators + graph_iterator(GraphContainerType *g, bool begin); + void internal_forward(); +}; // class graph_iterator + +// flags to modify the behavior of the graph reset(). Can be combined. +enum reset_flags { + rf_reset_protocol = 0, + rf_reset_bodies = 1 << 0, // delete the current node body, reset to a copy of the initial node body. + rf_clear_edges = 1 << 1 // delete edges +}; + +namespace internal { + +void activate_graph(tbb::flow::interface10::graph& g); +void deactivate_graph(tbb::flow::interface10::graph& g); +bool is_graph_active(tbb::flow::interface10::graph& g); +tbb::task& prioritize_task(tbb::flow::interface10::graph& g, tbb::task& arena_task); +void spawn_in_graph_arena(tbb::flow::interface10::graph& g, tbb::task& arena_task); +void enqueue_in_graph_arena(tbb::flow::interface10::graph &g, tbb::task& arena_task); +void add_task_to_graph_reset_list(tbb::flow::interface10::graph& g, tbb::task *tp); + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +struct graph_task_comparator { + bool operator()(const graph_task* left, const graph_task* right) { + return left->priority < right->priority; + } +}; + +typedef tbb::concurrent_priority_queue graph_task_priority_queue_t; + +class priority_task_selector : public task { +public: + priority_task_selector(graph_task_priority_queue_t& priority_queue) + : my_priority_queue(priority_queue) {} + task* execute() __TBB_override { + graph_task* t = NULL; + bool result = my_priority_queue.try_pop(t); + __TBB_ASSERT_EX( result, "Number of critical tasks for scheduler and tasks" + " in graph's priority queue mismatched" ); + __TBB_ASSERT( t && t != SUCCESSFULLY_ENQUEUED, + "Incorrect task submitted to graph priority queue" ); + __TBB_ASSERT( t->priority != tbb::flow::internal::no_priority, + "Tasks from graph's priority queue must have priority" ); + task* t_next = t->execute(); + task::destroy(*t); + return t_next; + } +private: + graph_task_priority_queue_t& my_priority_queue; +}; +#endif /* __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES */ + +} + +} // namespace interfaceX +namespace interface10 { +//! The graph class +/** This class serves as a handle to the graph */ +class graph : tbb::internal::no_copy, public tbb::flow::graph_proxy { + friend class tbb::flow::interface11::graph_node; + + template< typename Body > + class run_task : public tbb::flow::interface11::graph_task { + public: + run_task(Body& body +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + , tbb::flow::interface11::node_priority_t node_priority = tbb::flow::interface11::no_priority + ) : tbb::flow::interface11::graph_task(node_priority), +#else + ) : +#endif + my_body(body) { } + tbb::task *execute() __TBB_override { + my_body(); + return NULL; + } + private: + Body my_body; + }; + + template< typename Receiver, typename Body > + class run_and_put_task : public tbb::flow::interface11::graph_task { + public: + run_and_put_task(Receiver &r, Body& body) : my_receiver(r), my_body(body) {} + tbb::task *execute() __TBB_override { + tbb::task *res = my_receiver.try_put_task(my_body()); + if (res == tbb::flow::interface11::SUCCESSFULLY_ENQUEUED) res = NULL; + return res; + } + private: + Receiver &my_receiver; + Body my_body; + }; + typedef std::list task_list_type; + + class wait_functor { + tbb::task* graph_root_task; + public: + wait_functor(tbb::task* t) : graph_root_task(t) {} + void operator()() const { graph_root_task->wait_for_all(); } + }; + + //! A functor that spawns a task + class spawn_functor : tbb::internal::no_assign { + tbb::task& spawn_task; + public: + spawn_functor(tbb::task& t) : spawn_task(t) {} + void operator()() const { + FLOW_SPAWN(spawn_task); + } + }; + + void prepare_task_arena(bool reinit = false) { + if (reinit) { + __TBB_ASSERT(my_task_arena, "task arena is NULL"); + my_task_arena->terminate(); + my_task_arena->initialize(tbb::task_arena::attach()); + } + else { + __TBB_ASSERT(my_task_arena == NULL, "task arena is not NULL"); + my_task_arena = new tbb::task_arena(tbb::task_arena::attach()); + } + if (!my_task_arena->is_active()) // failed to attach + my_task_arena->initialize(); // create a new, default-initialized arena + __TBB_ASSERT(my_task_arena->is_active(), "task arena is not active"); + } + +public: + //! Constructs a graph with isolated task_group_context + graph(); + + //! Constructs a graph with use_this_context as context + explicit graph(tbb::task_group_context& use_this_context); + + //! Destroys the graph. + /** Calls wait_for_all, then destroys the root task and context. */ + ~graph(); + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + void set_name(const char *name); +#endif + + void increment_wait_count() { + reserve_wait(); + } + + void decrement_wait_count() { + release_wait(); + } + + //! Used to register that an external entity may still interact with the graph. + /** The graph will not return from wait_for_all until a matching number of decrement_wait_count calls + is made. */ + void reserve_wait() __TBB_override; + + //! Deregisters an external entity that may have interacted with the graph. + /** The graph will not return from wait_for_all until all the number of decrement_wait_count calls + matches the number of increment_wait_count calls. */ + void release_wait() __TBB_override; + + //! Spawns a task that runs a body and puts its output to a specific receiver + /** The task is spawned as a child of the graph. This is useful for running tasks + that need to block a wait_for_all() on the graph. For example a one-off source. */ + template< typename Receiver, typename Body > + void run(Receiver &r, Body body) { + if (tbb::flow::interface11::internal::is_graph_active(*this)) { + task* rtask = new (task::allocate_additional_child_of(*root_task())) + run_and_put_task< Receiver, Body >(r, body); + my_task_arena->execute(spawn_functor(*rtask)); + } + } + + //! Spawns a task that runs a function object + /** The task is spawned as a child of the graph. This is useful for running tasks + that need to block a wait_for_all() on the graph. For example a one-off source. */ + template< typename Body > + void run(Body body) { + if (tbb::flow::interface11::internal::is_graph_active(*this)) { + task* rtask = new (task::allocate_additional_child_of(*root_task())) run_task< Body >(body); + my_task_arena->execute(spawn_functor(*rtask)); + } + } + + //! Wait until graph is idle and decrement_wait_count calls equals increment_wait_count calls. + /** The waiting thread will go off and steal work while it is block in the wait_for_all. */ + void wait_for_all() { + cancelled = false; + caught_exception = false; + if (my_root_task) { +#if TBB_USE_EXCEPTIONS + try { +#endif + my_task_arena->execute(wait_functor(my_root_task)); +#if __TBB_TASK_GROUP_CONTEXT + cancelled = my_context->is_group_execution_cancelled(); +#endif +#if TBB_USE_EXCEPTIONS + } + catch (...) { + my_root_task->set_ref_count(1); + my_context->reset(); + caught_exception = true; + cancelled = true; + throw; + } +#endif +#if __TBB_TASK_GROUP_CONTEXT + // TODO: the "if" condition below is just a work-around to support the concurrent wait + // mode. The cancellation and exception mechanisms are still broken in this mode. + // Consider using task group not to re-implement the same functionality. + if (!(my_context->traits() & tbb::task_group_context::concurrent_wait)) { + my_context->reset(); // consistent with behavior in catch() +#endif + my_root_task->set_ref_count(1); +#if __TBB_TASK_GROUP_CONTEXT + } +#endif + } + } + + //! Returns the root task of the graph + tbb::task * root_task() { + return my_root_task; + } + + // ITERATORS + template + friend class tbb::flow::interface11::graph_iterator; + + // Graph iterator typedefs + typedef tbb::flow::interface11::graph_iterator iterator; + typedef tbb::flow::interface11::graph_iterator const_iterator; + + // Graph iterator constructors + //! start iterator + iterator begin(); + //! end iterator + iterator end(); + //! start const iterator + const_iterator begin() const; + //! end const iterator + const_iterator end() const; + //! start const iterator + const_iterator cbegin() const; + //! end const iterator + const_iterator cend() const; + + //! return status of graph execution + bool is_cancelled() { return cancelled; } + bool exception_thrown() { return caught_exception; } + + // thread-unsafe state reset. + void reset(tbb::flow::interface11::reset_flags f = tbb::flow::interface11::rf_reset_protocol); + +private: + tbb::task *my_root_task; +#if __TBB_TASK_GROUP_CONTEXT + tbb::task_group_context *my_context; +#endif + bool own_context; + bool cancelled; + bool caught_exception; + bool my_is_active; + task_list_type my_reset_task_list; + + tbb::flow::interface11::graph_node *my_nodes, *my_nodes_last; + + tbb::spin_mutex nodelist_mutex; + void register_node(tbb::flow::interface11::graph_node *n); + void remove_node(tbb::flow::interface11::graph_node *n); + + tbb::task_arena* my_task_arena; + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES + tbb::flow::interface11::internal::graph_task_priority_queue_t my_priority_queue; +#endif + + friend void tbb::flow::interface11::internal::activate_graph(graph& g); + friend void tbb::flow::interface11::internal::deactivate_graph(graph& g); + friend bool tbb::flow::interface11::internal::is_graph_active(graph& g); + friend tbb::task& tbb::flow::interface11::internal::prioritize_task(graph& g, tbb::task& arena_task); + friend void tbb::flow::interface11::internal::spawn_in_graph_arena(graph& g, tbb::task& arena_task); + friend void tbb::flow::interface11::internal::enqueue_in_graph_arena(graph &g, tbb::task& arena_task); + friend void tbb::flow::interface11::internal::add_task_to_graph_reset_list(graph& g, tbb::task *tp); + + friend class tbb::interface7::internal::task_arena_base; + +}; // class graph +} // namespace interface10 + +namespace interface11 { + +using tbb::flow::interface10::graph; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +namespace internal{ +class get_graph_helper; +} +#endif + +//! The base of all graph nodes. +class graph_node : tbb::internal::no_copy { + friend class graph; + template + friend class graph_iterator; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class internal::get_graph_helper; +#endif + +protected: + graph& my_graph; + graph_node *next, *prev; +public: + explicit graph_node(graph& g); + + virtual ~graph_node(); + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + virtual void set_name(const char *name) = 0; +#endif + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + virtual void extract() = 0; +#endif + +protected: + // performs the reset on an individual node. + virtual void reset_node(reset_flags f = rf_reset_protocol) = 0; +}; // class graph_node + +namespace internal { + +inline void activate_graph(graph& g) { + g.my_is_active = true; +} + +inline void deactivate_graph(graph& g) { + g.my_is_active = false; +} + +inline bool is_graph_active(graph& g) { + return g.my_is_active; +} + +#if __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +inline tbb::task& prioritize_task(graph& g, tbb::task& t) { + task* critical_task = &t; + // TODO: change flow graph's interfaces to work with graph_task type instead of tbb::task. + graph_task* gt = static_cast(&t); + if( gt->priority != no_priority ) { + //! Non-preemptive priority pattern. The original task is submitted as a work item to the + //! priority queue, and a new critical task is created to take and execute a work item with + //! the highest known priority. The reference counting responsibility is transferred (via + //! allocate_continuation) to the new task. + critical_task = new( gt->allocate_continuation() ) priority_task_selector(g.my_priority_queue); + tbb::internal::make_critical( *critical_task ); + g.my_priority_queue.push(gt); + } + return *critical_task; +} +#else +inline tbb::task& prioritize_task(graph&, tbb::task& t) { + return t; +} +#endif /* __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES */ + +//! Spawns a task inside graph arena +inline void spawn_in_graph_arena(graph& g, tbb::task& arena_task) { + if (is_graph_active(g)) { + graph::spawn_functor s_fn(prioritize_task(g, arena_task)); + __TBB_ASSERT(g.my_task_arena && g.my_task_arena->is_active(), NULL); + g.my_task_arena->execute(s_fn); + } +} + +//! Enqueues a task inside graph arena +inline void enqueue_in_graph_arena(graph &g, tbb::task& arena_task) { + if (is_graph_active(g)) { + __TBB_ASSERT( g.my_task_arena && g.my_task_arena->is_active(), "Is graph's arena initialized and active?" ); + task::enqueue(prioritize_task(g, arena_task), *g.my_task_arena); + } +} + +inline void add_task_to_graph_reset_list(graph& g, tbb::task *tp) { + g.my_reset_task_list.push_back(tp); +} + +} // namespace internal + +} // namespace interfaceX +} // namespace flow +} // namespace tbb + +#endif // __TBB_flow_graph_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_indexer_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_indexer_impl.h new file mode 100644 index 00000000..2900db91 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_indexer_impl.h @@ -0,0 +1,480 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_indexer_impl_H +#define __TBB__flow_graph_indexer_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "_flow_graph_types_impl.h" + +namespace internal { + + // Output of the indexer_node is a tbb::flow::tagged_msg, and will be of + // the form tagged_msg + // where the value of tag will indicate which result was put to the + // successor. + + template + task* do_try_put(const T &v, void *p) { + typename IndexerNodeBaseType::output_type o(K, v); + return reinterpret_cast(p)->try_put_task(&o); + } + + template + struct indexer_helper { + template + static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p, graph& g) { + typedef typename tuple_element::type T; + task *(*indexer_node_put_task)(const T&, void *) = do_try_put; + tbb::flow::get(my_input).set_up(p, indexer_node_put_task, g); + indexer_helper::template set_indexer_node_pointer(my_input, p, g); + } + template + static inline void reset_inputs(InputTuple &my_input, reset_flags f) { + indexer_helper::reset_inputs(my_input, f); + tbb::flow::get(my_input).reset_receiver(f); + } +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + template + static inline void extract(InputTuple &my_input) { + indexer_helper::extract(my_input); + tbb::flow::get(my_input).extract_receiver(); + } +#endif + }; + + template + struct indexer_helper { + template + static inline void set_indexer_node_pointer(PortTuple &my_input, IndexerNodeBaseType *p, graph& g) { + typedef typename tuple_element<0, TupleTypes>::type T; + task *(*indexer_node_put_task)(const T&, void *) = do_try_put; + tbb::flow::get<0>(my_input).set_up(p, indexer_node_put_task, g); + } + template + static inline void reset_inputs(InputTuple &my_input, reset_flags f) { + tbb::flow::get<0>(my_input).reset_receiver(f); + } +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + template + static inline void extract(InputTuple &my_input) { + tbb::flow::get<0>(my_input).extract_receiver(); + } +#endif + }; + + template + class indexer_input_port : public receiver { + private: + void* my_indexer_ptr; + typedef task* (* forward_function_ptr)(T const &, void* ); + forward_function_ptr my_try_put_task; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + spin_mutex my_pred_mutex; + typedef typename receiver::built_predecessors_type built_predecessors_type; + built_predecessors_type my_built_predecessors; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + graph* my_graph; + public: +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + indexer_input_port() : my_pred_mutex(), my_graph(NULL) {} + indexer_input_port( const indexer_input_port & other) : receiver(), my_pred_mutex(), my_graph(other.my_graph) { + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + void set_up(void* p, forward_function_ptr f, graph& g) { + my_indexer_ptr = p; + my_try_put_task = f; + my_graph = &g; + } +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::predecessor_list_type predecessor_list_type; + typedef typename receiver::predecessor_type predecessor_type; + + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + + size_t predecessor_count() __TBB_override { + spin_mutex::scoped_lock l(my_pred_mutex); + return my_built_predecessors.edge_count(); + } + void internal_add_built_predecessor(predecessor_type &p) __TBB_override { + spin_mutex::scoped_lock l(my_pred_mutex); + my_built_predecessors.add_edge(p); + } + void internal_delete_built_predecessor(predecessor_type &p) __TBB_override { + spin_mutex::scoped_lock l(my_pred_mutex); + my_built_predecessors.delete_edge(p); + } + void copy_predecessors( predecessor_list_type &v) __TBB_override { + spin_mutex::scoped_lock l(my_pred_mutex); + my_built_predecessors.copy_edges(v); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + task *try_put_task(const T &v) __TBB_override { + return my_try_put_task(v, my_indexer_ptr); + } + + graph& graph_reference() const __TBB_override { + return *my_graph; + } + + public: +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void reset_receiver(reset_flags f) __TBB_override { if(f&rf_clear_edges) my_built_predecessors.clear(); } +#else + void reset_receiver(reset_flags /*f*/) __TBB_override { } +#endif + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract_receiver() { my_built_predecessors.receiver_extract(*this); } +#endif + }; + + template + class indexer_node_FE { + public: + static const int N = tbb::flow::tuple_size::value; + typedef OutputType output_type; + typedef InputTuple input_type; + + // Some versions of Intel(R) C++ Compiler fail to generate an implicit constructor for the class which has std::tuple as a member. + indexer_node_FE() : my_inputs() {} + + input_type &input_ports() { return my_inputs; } + protected: + input_type my_inputs; + }; + + //! indexer_node_base + template + class indexer_node_base : public graph_node, public indexer_node_FE, + public sender { + protected: + using graph_node::my_graph; + public: + static const size_t N = tbb::flow::tuple_size::value; + typedef OutputType output_type; + typedef StructTypes tuple_types; + typedef typename sender::successor_type successor_type; + typedef indexer_node_FE input_ports_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename sender::built_successors_type built_successors_type; + typedef typename sender::successor_list_type successor_list_type; +#endif + + private: + // ----------- Aggregator ------------ + enum op_type { reg_succ, rem_succ, try__put_task +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + , add_blt_succ, del_blt_succ, + blt_succ_cnt, blt_succ_cpy +#endif + }; + typedef indexer_node_base class_type; + + class indexer_node_base_operation : public aggregated_operation { + public: + char type; + union { + output_type const *my_arg; + successor_type *my_succ; + task *bypass_t; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + size_t cnt_val; + successor_list_type *succv; +#endif + }; + indexer_node_base_operation(const output_type* e, op_type t) : + type(char(t)), my_arg(e) {} + indexer_node_base_operation(const successor_type &s, op_type t) : type(char(t)), + my_succ(const_cast(&s)) {} + indexer_node_base_operation(op_type t) : type(char(t)) {} + }; + + typedef internal::aggregating_functor handler_type; + friend class internal::aggregating_functor; + aggregator my_aggregator; + + void handle_operations(indexer_node_base_operation* op_list) { + indexer_node_base_operation *current; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + + case reg_succ: + my_successors.register_successor(*(current->my_succ)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + + case rem_succ: + my_successors.remove_successor(*(current->my_succ)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case try__put_task: { + current->bypass_t = my_successors.try_put_task(*(current->my_arg)); + __TBB_store_with_release(current->status, SUCCEEDED); // return of try_put_task actual return value + } + break; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + case add_blt_succ: + my_successors.internal_add_built_successor(*(current->my_succ)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case del_blt_succ: + my_successors.internal_delete_built_successor(*(current->my_succ)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_succ_cnt: + current->cnt_val = my_successors.successor_count(); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_succ_cpy: + my_successors.copy_successors(*(current->succv)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + } + } + } + // ---------- end aggregator ----------- + public: + indexer_node_base(graph& g) : graph_node(g), input_ports_type() { + indexer_helper::set_indexer_node_pointer(this->my_inputs, this, g); + my_successors.set_owner(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + indexer_node_base(const indexer_node_base& other) : graph_node(other.my_graph), input_ports_type(), sender() { + indexer_helper::set_indexer_node_pointer(this->my_inputs, this, other.my_graph); + my_successors.set_owner(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + bool register_successor(successor_type &r) __TBB_override { + indexer_node_base_operation op_data(r, reg_succ); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + bool remove_successor( successor_type &r) __TBB_override { + indexer_node_base_operation op_data(r, rem_succ); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + task * try_put_task(output_type const *v) { // not a virtual method in this class + indexer_node_base_operation op_data(v, try__put_task); + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + + void internal_add_built_successor( successor_type &r) __TBB_override { + indexer_node_base_operation op_data(r, add_blt_succ); + my_aggregator.execute(&op_data); + } + + void internal_delete_built_successor( successor_type &r) __TBB_override { + indexer_node_base_operation op_data(r, del_blt_succ); + my_aggregator.execute(&op_data); + } + + size_t successor_count() __TBB_override { + indexer_node_base_operation op_data(blt_succ_cnt); + my_aggregator.execute(&op_data); + return op_data.cnt_val; + } + + void copy_successors( successor_list_type &v) __TBB_override { + indexer_node_base_operation op_data(blt_succ_cpy); + op_data.succv = &v; + my_aggregator.execute(&op_data); + } + void extract() __TBB_override { + my_successors.built_successors().sender_extract(*this); + indexer_helper::extract(this->my_inputs); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + protected: + void reset_node(reset_flags f) __TBB_override { + if(f & rf_clear_edges) { + my_successors.clear(); + indexer_helper::reset_inputs(this->my_inputs,f); + } + } + + private: + broadcast_cache my_successors; + }; //indexer_node_base + + + template struct input_types; + + template + struct input_types<1, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename internal::tagged_msg type; + }; + + template + struct input_types<2, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename internal::tagged_msg type; + }; + + template + struct input_types<3, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename tuple_element<2, InputTuple>::type third_type; + typedef typename internal::tagged_msg type; + }; + + template + struct input_types<4, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename tuple_element<2, InputTuple>::type third_type; + typedef typename tuple_element<3, InputTuple>::type fourth_type; + typedef typename internal::tagged_msg type; + }; + + template + struct input_types<5, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename tuple_element<2, InputTuple>::type third_type; + typedef typename tuple_element<3, InputTuple>::type fourth_type; + typedef typename tuple_element<4, InputTuple>::type fifth_type; + typedef typename internal::tagged_msg type; + }; + + template + struct input_types<6, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename tuple_element<2, InputTuple>::type third_type; + typedef typename tuple_element<3, InputTuple>::type fourth_type; + typedef typename tuple_element<4, InputTuple>::type fifth_type; + typedef typename tuple_element<5, InputTuple>::type sixth_type; + typedef typename internal::tagged_msg type; + }; + + template + struct input_types<7, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename tuple_element<2, InputTuple>::type third_type; + typedef typename tuple_element<3, InputTuple>::type fourth_type; + typedef typename tuple_element<4, InputTuple>::type fifth_type; + typedef typename tuple_element<5, InputTuple>::type sixth_type; + typedef typename tuple_element<6, InputTuple>::type seventh_type; + typedef typename internal::tagged_msg type; + }; + + + template + struct input_types<8, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename tuple_element<2, InputTuple>::type third_type; + typedef typename tuple_element<3, InputTuple>::type fourth_type; + typedef typename tuple_element<4, InputTuple>::type fifth_type; + typedef typename tuple_element<5, InputTuple>::type sixth_type; + typedef typename tuple_element<6, InputTuple>::type seventh_type; + typedef typename tuple_element<7, InputTuple>::type eighth_type; + typedef typename internal::tagged_msg type; + }; + + + template + struct input_types<9, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename tuple_element<2, InputTuple>::type third_type; + typedef typename tuple_element<3, InputTuple>::type fourth_type; + typedef typename tuple_element<4, InputTuple>::type fifth_type; + typedef typename tuple_element<5, InputTuple>::type sixth_type; + typedef typename tuple_element<6, InputTuple>::type seventh_type; + typedef typename tuple_element<7, InputTuple>::type eighth_type; + typedef typename tuple_element<8, InputTuple>::type nineth_type; + typedef typename internal::tagged_msg type; + }; + + template + struct input_types<10, InputTuple> { + typedef typename tuple_element<0, InputTuple>::type first_type; + typedef typename tuple_element<1, InputTuple>::type second_type; + typedef typename tuple_element<2, InputTuple>::type third_type; + typedef typename tuple_element<3, InputTuple>::type fourth_type; + typedef typename tuple_element<4, InputTuple>::type fifth_type; + typedef typename tuple_element<5, InputTuple>::type sixth_type; + typedef typename tuple_element<6, InputTuple>::type seventh_type; + typedef typename tuple_element<7, InputTuple>::type eighth_type; + typedef typename tuple_element<8, InputTuple>::type nineth_type; + typedef typename tuple_element<9, InputTuple>::type tenth_type; + typedef typename internal::tagged_msg type; + }; + + // type generators + template + struct indexer_types : public input_types::value, OutputTuple> { + static const int N = tbb::flow::tuple_size::value; + typedef typename input_types::type output_type; + typedef typename wrap_tuple_elements::type input_ports_type; + typedef internal::indexer_node_FE indexer_FE_type; + typedef internal::indexer_node_base indexer_base_type; + }; + + template + class unfolded_indexer_node : public indexer_types::indexer_base_type { + public: + typedef typename indexer_types::input_ports_type input_ports_type; + typedef OutputTuple tuple_types; + typedef typename indexer_types::output_type output_type; + private: + typedef typename indexer_types::indexer_base_type base_type; + public: + unfolded_indexer_node(graph& g) : base_type(g) {} + unfolded_indexer_node(const unfolded_indexer_node &other) : base_type(other) {} + }; + +} /* namespace internal */ + +#endif /* __TBB__flow_graph_indexer_impl_H */ diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_item_buffer_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_item_buffer_impl.h new file mode 100644 index 00000000..e5a97d1c --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_item_buffer_impl.h @@ -0,0 +1,283 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_item_buffer_impl_H +#define __TBB__flow_graph_item_buffer_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "tbb/internal/_flow_graph_types_impl.h" // for aligned_pair + +// in namespace tbb::flow::interfaceX (included in _flow_graph_node_impl.h) + + //! Expandable buffer of items. The possible operations are push, pop, + //* tests for empty and so forth. No mutual exclusion is built in. + //* objects are constructed into and explicitly-destroyed. get_my_item gives + // a read-only reference to the item in the buffer. set_my_item may be called + // with either an empty or occupied slot. + + using internal::aligned_pair; + using internal::alignment_of; + +namespace internal { + + template > + class item_buffer { + public: + typedef T item_type; + enum buffer_item_state { no_item=0, has_item=1, reserved_item=2 }; + protected: + typedef size_t size_type; + typedef typename aligned_pair::type buffer_item_type; + typedef typename tbb::internal::allocator_rebind::type allocator_type; + buffer_item_type *my_array; + size_type my_array_size; + static const size_type initial_buffer_size = 4; + size_type my_head; + size_type my_tail; + + bool buffer_empty() const { return my_head == my_tail; } + + buffer_item_type &item(size_type i) { + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].second))%alignment_of::value),NULL); + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].first))%alignment_of::value), NULL); + return my_array[i & (my_array_size - 1) ]; + } + + const buffer_item_type &item(size_type i) const { + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].second))%alignment_of::value), NULL); + __TBB_ASSERT(!(size_type(&(my_array[i&(my_array_size-1)].first))%alignment_of::value), NULL); + return my_array[i & (my_array_size-1)]; + } + + bool my_item_valid(size_type i) const { return (i < my_tail) && (i >= my_head) && (item(i).second != no_item); } + bool my_item_reserved(size_type i) const { return item(i).second == reserved_item; } + + // object management in buffer + const item_type &get_my_item(size_t i) const { + __TBB_ASSERT(my_item_valid(i),"attempt to get invalid item"); + item_type *itm = (tbb::internal::punned_cast(&(item(i).first))); + return *(const item_type *)itm; + } + + // may be called with an empty slot or a slot that has already been constructed into. + void set_my_item(size_t i, const item_type &o) { + if(item(i).second != no_item) { + destroy_item(i); + } + new(&(item(i).first)) item_type(o); + item(i).second = has_item; + } + + // destructively-fetch an object from the buffer + void fetch_item(size_t i, item_type &o) { + __TBB_ASSERT(my_item_valid(i), "Trying to fetch an empty slot"); + o = get_my_item(i); // could have std::move assign semantics + destroy_item(i); + } + + // move an existing item from one slot to another. The moved-to slot must be unoccupied, + // the moved-from slot must exist and not be reserved. The after, from will be empty, + // to will be occupied but not reserved + void move_item(size_t to, size_t from) { + __TBB_ASSERT(!my_item_valid(to), "Trying to move to a non-empty slot"); + __TBB_ASSERT(my_item_valid(from), "Trying to move from an empty slot"); + set_my_item(to, get_my_item(from)); // could have std::move semantics + destroy_item(from); + + } + + // put an item in an empty slot. Return true if successful, else false + bool place_item(size_t here, const item_type &me) { +#if !TBB_DEPRECATED_SEQUENCER_DUPLICATES + if(my_item_valid(here)) return false; +#endif + set_my_item(here, me); + return true; + } + + // could be implemented with std::move semantics + void swap_items(size_t i, size_t j) { + __TBB_ASSERT(my_item_valid(i) && my_item_valid(j), "attempt to swap invalid item(s)"); + item_type temp = get_my_item(i); + set_my_item(i, get_my_item(j)); + set_my_item(j, temp); + } + + void destroy_item(size_type i) { + __TBB_ASSERT(my_item_valid(i), "destruction of invalid item"); + (tbb::internal::punned_cast(&(item(i).first)))->~item_type(); + item(i).second = no_item; + } + + // returns the front element + const item_type& front() const + { + __TBB_ASSERT(my_item_valid(my_head), "attempt to fetch head non-item"); + return get_my_item(my_head); + } + + // returns the back element + const item_type& back() const + { + __TBB_ASSERT(my_item_valid(my_tail - 1), "attempt to fetch head non-item"); + return get_my_item(my_tail - 1); + } + + // following methods are for reservation of the front of a buffer. + void reserve_item(size_type i) { __TBB_ASSERT(my_item_valid(i) && !my_item_reserved(i), "item cannot be reserved"); item(i).second = reserved_item; } + void release_item(size_type i) { __TBB_ASSERT(my_item_reserved(i), "item is not reserved"); item(i).second = has_item; } + + void destroy_front() { destroy_item(my_head); ++my_head; } + void destroy_back() { destroy_item(my_tail-1); --my_tail; } + + // we have to be able to test against a new tail value without changing my_tail + // grow_array doesn't work if we change my_tail when the old array is too small + size_type size(size_t new_tail = 0) { return (new_tail ? new_tail : my_tail) - my_head; } + size_type capacity() { return my_array_size; } + // sequencer_node does not use this method, so we don't + // need a version that passes in the new_tail value. + bool buffer_full() { return size() >= capacity(); } + + //! Grows the internal array. + void grow_my_array( size_t minimum_size ) { + // test that we haven't made the structure inconsistent. + __TBB_ASSERT(capacity() >= my_tail - my_head, "total items exceed capacity"); + size_type new_size = my_array_size ? 2*my_array_size : initial_buffer_size; + while( new_sizeback(); + destroy_back(); + return true; + } + + bool pop_front(item_type &v) { + if(!my_item_valid(my_head)) { + return false; + } + v = this->front(); + destroy_front(); + return true; + } + + // This is used both for reset and for grow_my_array. In the case of grow_my_array + // we want to retain the values of the head and tail. + void clean_up_buffer(bool reset_pointers) { + if (my_array) { + for( size_type i=my_head; i > + class reservable_item_buffer : public item_buffer { + protected: + using item_buffer::my_item_valid; + using item_buffer::my_head; + + public: + reservable_item_buffer() : item_buffer(), my_reserved(false) {} + void reset() {my_reserved = false; item_buffer::reset(); } + protected: + + bool reserve_front(T &v) { + if(my_reserved || !my_item_valid(this->my_head)) return false; + my_reserved = true; + // reserving the head + v = this->front(); + this->reserve_item(this->my_head); + return true; + } + + void consume_front() { + __TBB_ASSERT(my_reserved, "Attempt to consume a non-reserved item"); + this->destroy_front(); + my_reserved = false; + } + + void release_front() { + __TBB_ASSERT(my_reserved, "Attempt to release a non-reserved item"); + this->release_item(this->my_head); + my_reserved = false; + } + + bool my_reserved; + }; + +} // namespace internal + +#endif // __TBB__flow_graph_item_buffer_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_join_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_join_impl.h new file mode 100644 index 00000000..16837c7a --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_join_impl.h @@ -0,0 +1,2002 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_join_impl_H +#define __TBB__flow_graph_join_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +namespace internal { + + struct forwarding_base : tbb::internal::no_assign { + forwarding_base(graph &g) : graph_ref(g) {} + virtual ~forwarding_base() {} + // decrement_port_count may create a forwarding task. If we cannot handle the task + // ourselves, ask decrement_port_count to deal with it. + virtual task * decrement_port_count(bool handle_task) = 0; + virtual void increment_port_count() = 0; + // moved here so input ports can queue tasks + graph& graph_ref; + }; + + // specialization that lets us keep a copy of the current_key for building results. + // KeyType can be a reference type. + template + struct matching_forwarding_base : public forwarding_base { + typedef typename tbb::internal::strip::type current_key_type; + matching_forwarding_base(graph &g) : forwarding_base(g) { } + virtual task * increment_key_count(current_key_type const & /*t*/, bool /*handle_task*/) = 0; // {return NULL;} + current_key_type current_key; // so ports can refer to FE's desired items + }; + + template< int N > + struct join_helper { + + template< typename TupleType, typename PortType > + static inline void set_join_node_pointer(TupleType &my_input, PortType *port) { + tbb::flow::get( my_input ).set_join_node_pointer(port); + join_helper::set_join_node_pointer( my_input, port ); + } + template< typename TupleType > + static inline void consume_reservations( TupleType &my_input ) { + tbb::flow::get( my_input ).consume(); + join_helper::consume_reservations( my_input ); + } + + template< typename TupleType > + static inline void release_my_reservation( TupleType &my_input ) { + tbb::flow::get( my_input ).release(); + } + + template + static inline void release_reservations( TupleType &my_input) { + join_helper::release_reservations(my_input); + release_my_reservation(my_input); + } + + template< typename InputTuple, typename OutputTuple > + static inline bool reserve( InputTuple &my_input, OutputTuple &out) { + if ( !tbb::flow::get( my_input ).reserve( tbb::flow::get( out ) ) ) return false; + if ( !join_helper::reserve( my_input, out ) ) { + release_my_reservation( my_input ); + return false; + } + return true; + } + + template + static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) { + bool res = tbb::flow::get(my_input).get_item(tbb::flow::get(out) ); // may fail + return join_helper::get_my_item(my_input, out) && res; // do get on other inputs before returning + } + + template + static inline bool get_items(InputTuple &my_input, OutputTuple &out) { + return get_my_item(my_input, out); + } + + template + static inline void reset_my_port(InputTuple &my_input) { + join_helper::reset_my_port(my_input); + tbb::flow::get(my_input).reset_port(); + } + + template + static inline void reset_ports(InputTuple& my_input) { + reset_my_port(my_input); + } + + template + static inline void set_key_functors(InputTuple &my_input, KeyFuncTuple &my_key_funcs) { + tbb::flow::get(my_input).set_my_key_func(tbb::flow::get(my_key_funcs)); + tbb::flow::get(my_key_funcs) = NULL; + join_helper::set_key_functors(my_input, my_key_funcs); + } + + template< typename KeyFuncTuple> + static inline void copy_key_functors(KeyFuncTuple &my_inputs, KeyFuncTuple &other_inputs) { + if(tbb::flow::get(other_inputs).get_my_key_func()) { + tbb::flow::get(my_inputs).set_my_key_func(tbb::flow::get(other_inputs).get_my_key_func()->clone()); + } + join_helper::copy_key_functors(my_inputs, other_inputs); + } + + template + static inline void reset_inputs(InputTuple &my_input, reset_flags f) { + join_helper::reset_inputs(my_input, f); + tbb::flow::get(my_input).reset_receiver(f); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + template + static inline void extract_inputs(InputTuple &my_input) { + join_helper::extract_inputs(my_input); + tbb::flow::get(my_input).extract_receiver(); + } +#endif + }; // join_helper + + template< > + struct join_helper<1> { + + template< typename TupleType, typename PortType > + static inline void set_join_node_pointer(TupleType &my_input, PortType *port) { + tbb::flow::get<0>( my_input ).set_join_node_pointer(port); + } + + template< typename TupleType > + static inline void consume_reservations( TupleType &my_input ) { + tbb::flow::get<0>( my_input ).consume(); + } + + template< typename TupleType > + static inline void release_my_reservation( TupleType &my_input ) { + tbb::flow::get<0>( my_input ).release(); + } + + template + static inline void release_reservations( TupleType &my_input) { + release_my_reservation(my_input); + } + + template< typename InputTuple, typename OutputTuple > + static inline bool reserve( InputTuple &my_input, OutputTuple &out) { + return tbb::flow::get<0>( my_input ).reserve( tbb::flow::get<0>( out ) ); + } + + template + static inline bool get_my_item( InputTuple &my_input, OutputTuple &out) { + return tbb::flow::get<0>(my_input).get_item(tbb::flow::get<0>(out)); + } + + template + static inline bool get_items(InputTuple &my_input, OutputTuple &out) { + return get_my_item(my_input, out); + } + + template + static inline void reset_my_port(InputTuple &my_input) { + tbb::flow::get<0>(my_input).reset_port(); + } + + template + static inline void reset_ports(InputTuple& my_input) { + reset_my_port(my_input); + } + + template + static inline void set_key_functors(InputTuple &my_input, KeyFuncTuple &my_key_funcs) { + tbb::flow::get<0>(my_input).set_my_key_func(tbb::flow::get<0>(my_key_funcs)); + tbb::flow::get<0>(my_key_funcs) = NULL; + } + + template< typename KeyFuncTuple> + static inline void copy_key_functors(KeyFuncTuple &my_inputs, KeyFuncTuple &other_inputs) { + if(tbb::flow::get<0>(other_inputs).get_my_key_func()) { + tbb::flow::get<0>(my_inputs).set_my_key_func(tbb::flow::get<0>(other_inputs).get_my_key_func()->clone()); + } + } + template + static inline void reset_inputs(InputTuple &my_input, reset_flags f) { + tbb::flow::get<0>(my_input).reset_receiver(f); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + template + static inline void extract_inputs(InputTuple &my_input) { + tbb::flow::get<0>(my_input).extract_receiver(); + } +#endif + }; // join_helper<1> + + //! The two-phase join port + template< typename T > + class reserving_port : public receiver { + public: + typedef T input_type; + typedef typename receiver::predecessor_type predecessor_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::predecessor_list_type predecessor_list_type; + typedef typename receiver::built_predecessors_type built_predecessors_type; +#endif + private: + // ----------- Aggregator ------------ + enum op_type { reg_pred, rem_pred, res_item, rel_res, con_res +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy +#endif + }; + typedef reserving_port class_type; + + class reserving_port_operation : public aggregated_operation { + public: + char type; + union { + T *my_arg; + predecessor_type *my_pred; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + size_t cnt_val; + predecessor_list_type *plist; +#endif + }; + reserving_port_operation(const T& e, op_type t) : + type(char(t)), my_arg(const_cast(&e)) {} + reserving_port_operation(const predecessor_type &s, op_type t) : type(char(t)), + my_pred(const_cast(&s)) {} + reserving_port_operation(op_type t) : type(char(t)) {} + }; + + typedef internal::aggregating_functor handler_type; + friend class internal::aggregating_functor; + aggregator my_aggregator; + + void handle_operations(reserving_port_operation* op_list) { + reserving_port_operation *current; + bool no_predecessors; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case reg_pred: + no_predecessors = my_predecessors.empty(); + my_predecessors.add(*(current->my_pred)); + if ( no_predecessors ) { + (void) my_join->decrement_port_count(true); // may try to forward + } + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case rem_pred: + my_predecessors.remove(*(current->my_pred)); + if(my_predecessors.empty()) my_join->increment_port_count(); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case res_item: + if ( reserved ) { + __TBB_store_with_release(current->status, FAILED); + } + else if ( my_predecessors.try_reserve( *(current->my_arg) ) ) { + reserved = true; + __TBB_store_with_release(current->status, SUCCEEDED); + } else { + if ( my_predecessors.empty() ) { + my_join->increment_port_count(); + } + __TBB_store_with_release(current->status, FAILED); + } + break; + case rel_res: + reserved = false; + my_predecessors.try_release( ); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case con_res: + reserved = false; + my_predecessors.try_consume( ); + __TBB_store_with_release(current->status, SUCCEEDED); + break; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + case add_blt_pred: + my_predecessors.internal_add_built_predecessor(*(current->my_pred)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case del_blt_pred: + my_predecessors.internal_delete_built_predecessor(*(current->my_pred)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_pred_cnt: + current->cnt_val = my_predecessors.predecessor_count(); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_pred_cpy: + my_predecessors.copy_predecessors(*(current->plist)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + } + } + } + + protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + task *try_put_task( const T & ) __TBB_override { + return NULL; + } + + graph& graph_reference() const __TBB_override { + return my_join->graph_ref; + } + + public: + + //! Constructor + reserving_port() : reserved(false) { + my_join = NULL; + my_predecessors.set_owner( this ); + my_aggregator.initialize_handler(handler_type(this)); + } + + // copy constructor + reserving_port(const reserving_port& /* other */) : receiver() { + reserved = false; + my_join = NULL; + my_predecessors.set_owner( this ); + my_aggregator.initialize_handler(handler_type(this)); + } + + void set_join_node_pointer(forwarding_base *join) { + my_join = join; + } + + //! Add a predecessor + bool register_predecessor( predecessor_type &src ) __TBB_override { + reserving_port_operation op_data(src, reg_pred); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + //! Remove a predecessor + bool remove_predecessor( predecessor_type &src ) __TBB_override { + reserving_port_operation op_data(src, rem_pred); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + //! Reserve an item from the port + bool reserve( T &v ) { + reserving_port_operation op_data(v, res_item); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + //! Release the port + void release( ) { + reserving_port_operation op_data(rel_res); + my_aggregator.execute(&op_data); + } + + //! Complete use of the port + void consume( ) { + reserving_port_operation op_data(con_res); + my_aggregator.execute(&op_data); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_predecessors_type &built_predecessors() __TBB_override { return my_predecessors.built_predecessors(); } + void internal_add_built_predecessor(predecessor_type &src) __TBB_override { + reserving_port_operation op_data(src, add_blt_pred); + my_aggregator.execute(&op_data); + } + + void internal_delete_built_predecessor(predecessor_type &src) __TBB_override { + reserving_port_operation op_data(src, del_blt_pred); + my_aggregator.execute(&op_data); + } + + size_t predecessor_count() __TBB_override { + reserving_port_operation op_data(blt_pred_cnt); + my_aggregator.execute(&op_data); + return op_data.cnt_val; + } + + void copy_predecessors(predecessor_list_type &l) __TBB_override { + reserving_port_operation op_data(blt_pred_cpy); + op_data.plist = &l; + my_aggregator.execute(&op_data); + } + + void extract_receiver() { + my_predecessors.built_predecessors().receiver_extract(*this); + } + +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + void reset_receiver( reset_flags f) __TBB_override { + if(f & rf_clear_edges) my_predecessors.clear(); + else + my_predecessors.reset(); + reserved = false; + __TBB_ASSERT(!(f&rf_clear_edges) || my_predecessors.empty(), "port edges not removed"); + } + + private: +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class get_graph_helper; +#endif + + forwarding_base *my_join; + reservable_predecessor_cache< T, null_mutex > my_predecessors; + bool reserved; + }; // reserving_port + + //! queueing join_port + template + class queueing_port : public receiver, public item_buffer { + public: + typedef T input_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef queueing_port class_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::built_predecessors_type built_predecessors_type; + typedef typename receiver::predecessor_list_type predecessor_list_type; +#endif + + // ----------- Aggregator ------------ + private: + enum op_type { get__item, res_port, try__put_task +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy +#endif + }; + + class queueing_port_operation : public aggregated_operation { + public: + char type; + T my_val; + T *my_arg; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + predecessor_type *pred; + size_t cnt_val; + predecessor_list_type *plist; +#endif + task * bypass_t; + // constructor for value parameter + queueing_port_operation(const T& e, op_type t) : + type(char(t)), my_val(e) + , bypass_t(NULL) + {} + // constructor for pointer parameter + queueing_port_operation(const T* p, op_type t) : + type(char(t)), my_arg(const_cast(p)) + , bypass_t(NULL) + {} + // constructor with no parameter + queueing_port_operation(op_type t) : type(char(t)) + , bypass_t(NULL) + {} + }; + + typedef internal::aggregating_functor handler_type; + friend class internal::aggregating_functor; + aggregator my_aggregator; + + void handle_operations(queueing_port_operation* op_list) { + queueing_port_operation *current; + bool was_empty; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case try__put_task: { + task *rtask = NULL; + was_empty = this->buffer_empty(); + this->push_back(current->my_val); + if (was_empty) rtask = my_join->decrement_port_count(false); + else + rtask = SUCCESSFULLY_ENQUEUED; + current->bypass_t = rtask; + __TBB_store_with_release(current->status, SUCCEEDED); + } + break; + case get__item: + if(!this->buffer_empty()) { + *(current->my_arg) = this->front(); + __TBB_store_with_release(current->status, SUCCEEDED); + } + else { + __TBB_store_with_release(current->status, FAILED); + } + break; + case res_port: + __TBB_ASSERT(this->my_item_valid(this->my_head), "No item to reset"); + this->destroy_front(); + if(this->my_item_valid(this->my_head)) { + (void)my_join->decrement_port_count(true); + } + __TBB_store_with_release(current->status, SUCCEEDED); + break; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + case add_blt_pred: + my_built_predecessors.add_edge(*(current->pred)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case del_blt_pred: + my_built_predecessors.delete_edge(*(current->pred)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_pred_cnt: + current->cnt_val = my_built_predecessors.edge_count(); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_pred_cpy: + my_built_predecessors.copy_edges(*(current->plist)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + } + } + } + // ------------ End Aggregator --------------- + + protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + task *try_put_task(const T &v) __TBB_override { + queueing_port_operation op_data(v, try__put_task); + my_aggregator.execute(&op_data); + __TBB_ASSERT(op_data.status == SUCCEEDED || !op_data.bypass_t, "inconsistent return from aggregator"); + if(!op_data.bypass_t) return SUCCESSFULLY_ENQUEUED; + return op_data.bypass_t; + } + + graph& graph_reference() const __TBB_override { + return my_join->graph_ref; + } + + public: + + //! Constructor + queueing_port() : item_buffer() { + my_join = NULL; + my_aggregator.initialize_handler(handler_type(this)); + } + + //! copy constructor + queueing_port(const queueing_port& /* other */) : receiver(), item_buffer() { + my_join = NULL; + my_aggregator.initialize_handler(handler_type(this)); + } + + //! record parent for tallying available items + void set_join_node_pointer(forwarding_base *join) { + my_join = join; + } + + bool get_item( T &v ) { + queueing_port_operation op_data(&v, get__item); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + // reset_port is called when item is accepted by successor, but + // is initiated by join_node. + void reset_port() { + queueing_port_operation op_data(res_port); + my_aggregator.execute(&op_data); + return; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + + void internal_add_built_predecessor(predecessor_type &p) __TBB_override { + queueing_port_operation op_data(add_blt_pred); + op_data.pred = &p; + my_aggregator.execute(&op_data); + } + + void internal_delete_built_predecessor(predecessor_type &p) __TBB_override { + queueing_port_operation op_data(del_blt_pred); + op_data.pred = &p; + my_aggregator.execute(&op_data); + } + + size_t predecessor_count() __TBB_override { + queueing_port_operation op_data(blt_pred_cnt); + my_aggregator.execute(&op_data); + return op_data.cnt_val; + } + + void copy_predecessors(predecessor_list_type &l) __TBB_override { + queueing_port_operation op_data(blt_pred_cpy); + op_data.plist = &l; + my_aggregator.execute(&op_data); + } + + void extract_receiver() { + item_buffer::reset(); + my_built_predecessors.receiver_extract(*this); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + void reset_receiver(reset_flags f) __TBB_override { + tbb::internal::suppress_unused_warning(f); + item_buffer::reset(); +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + if (f & rf_clear_edges) + my_built_predecessors.clear(); +#endif + } + + private: +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + friend class get_graph_helper; +#endif + + forwarding_base *my_join; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + edge_container my_built_predecessors; +#endif + }; // queueing_port + +#include "_flow_graph_tagged_buffer_impl.h" + + template + struct count_element { + K my_key; + size_t my_value; + }; + + // method to access the key in the counting table + // the ref has already been removed from K + template< typename K > + struct key_to_count_functor { + typedef count_element table_item_type; + const K& operator()(const table_item_type& v) { return v.my_key; } + }; + + // the ports can have only one template parameter. We wrap the types needed in + // a traits type + template< class TraitsType > + class key_matching_port : + public receiver, + public hash_buffer< typename TraitsType::K, typename TraitsType::T, typename TraitsType::TtoK, + typename TraitsType::KHash > { + public: + typedef TraitsType traits; + typedef key_matching_port class_type; + typedef typename TraitsType::T input_type; + typedef typename TraitsType::K key_type; + typedef typename tbb::internal::strip::type noref_key_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef typename TraitsType::TtoK type_to_key_func_type; + typedef typename TraitsType::KHash hash_compare_type; + typedef hash_buffer< key_type, input_type, type_to_key_func_type, hash_compare_type > buffer_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename receiver::built_predecessors_type built_predecessors_type; + typedef typename receiver::predecessor_list_type predecessor_list_type; +#endif + private: +// ----------- Aggregator ------------ + private: + enum op_type { try__put, get__item, res_port +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + , add_blt_pred, del_blt_pred, blt_pred_cnt, blt_pred_cpy +#endif + }; + + class key_matching_port_operation : public aggregated_operation { + public: + char type; + input_type my_val; + input_type *my_arg; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + predecessor_type *pred; + size_t cnt_val; + predecessor_list_type *plist; +#endif + // constructor for value parameter + key_matching_port_operation(const input_type& e, op_type t) : + type(char(t)), my_val(e) {} + // constructor for pointer parameter + key_matching_port_operation(const input_type* p, op_type t) : + type(char(t)), my_arg(const_cast(p)) {} + // constructor with no parameter + key_matching_port_operation(op_type t) : type(char(t)) {} + }; + + typedef internal::aggregating_functor handler_type; + friend class internal::aggregating_functor; + aggregator my_aggregator; + + void handle_operations(key_matching_port_operation* op_list) { + key_matching_port_operation *current; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case try__put: { + bool was_inserted = this->insert_with_key(current->my_val); + // return failure if a duplicate insertion occurs + __TBB_store_with_release(current->status, was_inserted ? SUCCEEDED : FAILED); + } + break; + case get__item: + // use current_key from FE for item + if(!this->find_with_key(my_join->current_key, *(current->my_arg))) { + __TBB_ASSERT(false, "Failed to find item corresponding to current_key."); + } + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case res_port: + // use current_key from FE for item + this->delete_with_key(my_join->current_key); + __TBB_store_with_release(current->status, SUCCEEDED); + break; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + case add_blt_pred: + my_built_predecessors.add_edge(*(current->pred)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case del_blt_pred: + my_built_predecessors.delete_edge(*(current->pred)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_pred_cnt: + current->cnt_val = my_built_predecessors.edge_count(); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_pred_cpy: + my_built_predecessors.copy_edges(*(current->plist)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; +#endif + } + } + } +// ------------ End Aggregator --------------- + protected: + template< typename R, typename B > friend class run_and_put_task; + template friend class internal::broadcast_cache; + template friend class internal::round_robin_cache; + task *try_put_task(const input_type& v) __TBB_override { + key_matching_port_operation op_data(v, try__put); + task *rtask = NULL; + my_aggregator.execute(&op_data); + if(op_data.status == SUCCEEDED) { + rtask = my_join->increment_key_count((*(this->get_key_func()))(v), false); // may spawn + // rtask has to reflect the return status of the try_put + if(!rtask) rtask = SUCCESSFULLY_ENQUEUED; + } + return rtask; + } + + graph& graph_reference() const __TBB_override { + return my_join->graph_ref; + } + + public: + + key_matching_port() : receiver(), buffer_type() { + my_join = NULL; + my_aggregator.initialize_handler(handler_type(this)); + } + + // copy constructor + key_matching_port(const key_matching_port& /*other*/) : receiver(), buffer_type() { + my_join = NULL; + my_aggregator.initialize_handler(handler_type(this)); + } + + ~key_matching_port() { } + + void set_join_node_pointer(forwarding_base *join) { + my_join = dynamic_cast*>(join); + } + + void set_my_key_func(type_to_key_func_type *f) { this->set_key_func(f); } + + type_to_key_func_type* get_my_key_func() { return this->get_key_func(); } + + bool get_item( input_type &v ) { + // aggregator uses current_key from FE for Key + key_matching_port_operation op_data(&v, get__item); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_predecessors_type &built_predecessors() __TBB_override { return my_built_predecessors; } + + void internal_add_built_predecessor(predecessor_type &p) __TBB_override { + key_matching_port_operation op_data(add_blt_pred); + op_data.pred = &p; + my_aggregator.execute(&op_data); + } + + void internal_delete_built_predecessor(predecessor_type &p) __TBB_override { + key_matching_port_operation op_data(del_blt_pred); + op_data.pred = &p; + my_aggregator.execute(&op_data); + } + + size_t predecessor_count() __TBB_override { + key_matching_port_operation op_data(blt_pred_cnt); + my_aggregator.execute(&op_data); + return op_data.cnt_val; + } + + void copy_predecessors(predecessor_list_type &l) __TBB_override { + key_matching_port_operation op_data(blt_pred_cpy); + op_data.plist = &l; + my_aggregator.execute(&op_data); + } +#endif + + // reset_port is called when item is accepted by successor, but + // is initiated by join_node. + void reset_port() { + key_matching_port_operation op_data(res_port); + my_aggregator.execute(&op_data); + return; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract_receiver() { + buffer_type::reset(); + my_built_predecessors.receiver_extract(*this); + } +#endif + void reset_receiver(reset_flags f ) __TBB_override { + tbb::internal::suppress_unused_warning(f); + buffer_type::reset(); +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + if (f & rf_clear_edges) + my_built_predecessors.clear(); +#endif + } + + private: + // my_join forwarding base used to count number of inputs that + // received key. + matching_forwarding_base *my_join; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + edge_container my_built_predecessors; +#endif + }; // key_matching_port + + using namespace graph_policy_namespace; + + template + class join_node_base; + + //! join_node_FE : implements input port policy + template + class join_node_FE; + + template + class join_node_FE : public forwarding_base { + public: + static const int N = tbb::flow::tuple_size::value; + typedef OutputTuple output_type; + typedef InputTuple input_type; + typedef join_node_base base_node_type; // for forwarding + + join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) { + ports_with_no_inputs = N; + join_helper::set_join_node_pointer(my_inputs, this); + } + + join_node_FE(const join_node_FE& other) : forwarding_base((other.forwarding_base::graph_ref)), my_node(NULL) { + ports_with_no_inputs = N; + join_helper::set_join_node_pointer(my_inputs, this); + } + + void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; } + + void increment_port_count() __TBB_override { + ++ports_with_no_inputs; + } + + // if all input_ports have predecessors, spawn forward to try and consume tuples + task * decrement_port_count(bool handle_task) __TBB_override { + if(ports_with_no_inputs.fetch_and_decrement() == 1) { + if(internal::is_graph_active(this->graph_ref)) { + task *rtask = new ( task::allocate_additional_child_of( *(this->graph_ref.root_task()) ) ) + forward_task_bypass(*my_node); + if(!handle_task) return rtask; + internal::spawn_in_graph_arena(this->graph_ref, *rtask); + } + } + return NULL; + } + + input_type &input_ports() { return my_inputs; } + + protected: + + void reset( reset_flags f) { + // called outside of parallel contexts + ports_with_no_inputs = N; + join_helper::reset_inputs(my_inputs, f); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract( ) { + // called outside of parallel contexts + ports_with_no_inputs = N; + join_helper::extract_inputs(my_inputs); + } +#endif + + // all methods on input ports should be called under mutual exclusion from join_node_base. + + bool tuple_build_may_succeed() { + return !ports_with_no_inputs; + } + + bool try_to_make_tuple(output_type &out) { + if(ports_with_no_inputs) return false; + return join_helper::reserve(my_inputs, out); + } + + void tuple_accepted() { + join_helper::consume_reservations(my_inputs); + } + void tuple_rejected() { + join_helper::release_reservations(my_inputs); + } + + input_type my_inputs; + base_node_type *my_node; + atomic ports_with_no_inputs; + }; // join_node_FE + + template + class join_node_FE : public forwarding_base { + public: + static const int N = tbb::flow::tuple_size::value; + typedef OutputTuple output_type; + typedef InputTuple input_type; + typedef join_node_base base_node_type; // for forwarding + + join_node_FE(graph &g) : forwarding_base(g), my_node(NULL) { + ports_with_no_items = N; + join_helper::set_join_node_pointer(my_inputs, this); + } + + join_node_FE(const join_node_FE& other) : forwarding_base((other.forwarding_base::graph_ref)), my_node(NULL) { + ports_with_no_items = N; + join_helper::set_join_node_pointer(my_inputs, this); + } + + // needed for forwarding + void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; } + + void reset_port_count() { + ports_with_no_items = N; + } + + // if all input_ports have items, spawn forward to try and consume tuples + task * decrement_port_count(bool handle_task) __TBB_override + { + if(ports_with_no_items.fetch_and_decrement() == 1) { + if(internal::is_graph_active(this->graph_ref)) { + task *rtask = new ( task::allocate_additional_child_of( *(this->graph_ref.root_task()) ) ) + forward_task_bypass (*my_node); + if(!handle_task) return rtask; + internal::spawn_in_graph_arena(this->graph_ref, *rtask); + } + } + return NULL; + } + + void increment_port_count() __TBB_override { __TBB_ASSERT(false, NULL); } // should never be called + + input_type &input_ports() { return my_inputs; } + + protected: + + void reset( reset_flags f) { + reset_port_count(); + join_helper::reset_inputs(my_inputs, f ); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract() { + reset_port_count(); + join_helper::extract_inputs(my_inputs); + } +#endif + // all methods on input ports should be called under mutual exclusion from join_node_base. + + bool tuple_build_may_succeed() { + return !ports_with_no_items; + } + + bool try_to_make_tuple(output_type &out) { + if(ports_with_no_items) return false; + return join_helper::get_items(my_inputs, out); + } + + void tuple_accepted() { + reset_port_count(); + join_helper::reset_ports(my_inputs); + } + void tuple_rejected() { + // nothing to do. + } + + input_type my_inputs; + base_node_type *my_node; + atomic ports_with_no_items; + }; // join_node_FE + + // key_matching join front-end. + template + class join_node_FE, InputTuple, OutputTuple> : public matching_forwarding_base, + // buffer of key value counts + public hash_buffer< // typedefed below to key_to_count_buffer_type + typename tbb::internal::strip::type&, // force ref type on K + count_element::type>, + internal::type_to_key_function_body< + count_element::type>, + typename tbb::internal::strip::type& >, + KHash >, + // buffer of output items + public item_buffer { + public: + static const int N = tbb::flow::tuple_size::value; + typedef OutputTuple output_type; + typedef InputTuple input_type; + typedef K key_type; + typedef typename tbb::internal::strip::type unref_key_type; + typedef KHash key_hash_compare; + // must use K without ref. + typedef count_element count_element_type; + // method that lets us refer to the key of this type. + typedef key_to_count_functor key_to_count_func; + typedef internal::type_to_key_function_body< count_element_type, unref_key_type&> TtoK_function_body_type; + typedef internal::type_to_key_function_body_leaf TtoK_function_body_leaf_type; + // this is the type of the special table that keeps track of the number of discrete + // elements corresponding to each key that we've seen. + typedef hash_buffer< unref_key_type&, count_element_type, TtoK_function_body_type, key_hash_compare > + key_to_count_buffer_type; + typedef item_buffer output_buffer_type; + typedef join_node_base, InputTuple, OutputTuple> base_node_type; // for forwarding + typedef matching_forwarding_base forwarding_base_type; + +// ----------- Aggregator ------------ + // the aggregator is only needed to serialize the access to the hash table. + // and the output_buffer_type base class + private: + enum op_type { res_count, inc_count, may_succeed, try_make }; + typedef join_node_FE, InputTuple, OutputTuple> class_type; + + class key_matching_FE_operation : public aggregated_operation { + public: + char type; + unref_key_type my_val; + output_type* my_output; + task *bypass_t; + bool enqueue_task; + // constructor for value parameter + key_matching_FE_operation(const unref_key_type& e , bool q_task , op_type t) : type(char(t)), my_val(e), + my_output(NULL), bypass_t(NULL), enqueue_task(q_task) {} + key_matching_FE_operation(output_type *p, op_type t) : type(char(t)), my_output(p), bypass_t(NULL), + enqueue_task(true) {} + // constructor with no parameter + key_matching_FE_operation(op_type t) : type(char(t)), my_output(NULL), bypass_t(NULL), enqueue_task(true) {} + }; + + typedef internal::aggregating_functor handler_type; + friend class internal::aggregating_functor; + aggregator my_aggregator; + + // called from aggregator, so serialized + // returns a task pointer if the a task would have been enqueued but we asked that + // it be returned. Otherwise returns NULL. + task * fill_output_buffer(unref_key_type &t, bool should_enqueue, bool handle_task) { + output_type l_out; + task *rtask = NULL; + bool do_fwd = should_enqueue && this->buffer_empty() && internal::is_graph_active(this->graph_ref); + this->current_key = t; + this->delete_with_key(this->current_key); // remove the key + if(join_helper::get_items(my_inputs, l_out)) { // <== call back + this->push_back(l_out); + if(do_fwd) { // we enqueue if receiving an item from predecessor, not if successor asks for item + rtask = new ( task::allocate_additional_child_of( *(this->graph_ref.root_task()) ) ) + forward_task_bypass(*my_node); + if(handle_task) { + internal::spawn_in_graph_arena(this->graph_ref, *rtask); + rtask = NULL; + } + do_fwd = false; + } + // retire the input values + join_helper::reset_ports(my_inputs); // <== call back + } + else { + __TBB_ASSERT(false, "should have had something to push"); + } + return rtask; + } + + void handle_operations(key_matching_FE_operation* op_list) { + key_matching_FE_operation *current; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case res_count: // called from BE + { + this->destroy_front(); + __TBB_store_with_release(current->status, SUCCEEDED); + } + break; + case inc_count: { // called from input ports + count_element_type *p = 0; + unref_key_type &t = current->my_val; + bool do_enqueue = current->enqueue_task; + if(!(this->find_ref_with_key(t,p))) { + count_element_type ev; + ev.my_key = t; + ev.my_value = 0; + this->insert_with_key(ev); + if(!(this->find_ref_with_key(t,p))) { + __TBB_ASSERT(false, "should find key after inserting it"); + } + } + if(++(p->my_value) == size_t(N)) { + task *rtask = fill_output_buffer(t, true, do_enqueue); + __TBB_ASSERT(!rtask || !do_enqueue, "task should not be returned"); + current->bypass_t = rtask; + } + } + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case may_succeed: // called from BE + __TBB_store_with_release(current->status, this->buffer_empty() ? FAILED : SUCCEEDED); + break; + case try_make: // called from BE + if(this->buffer_empty()) { + __TBB_store_with_release(current->status, FAILED); + } + else { + *(current->my_output) = this->front(); + __TBB_store_with_release(current->status, SUCCEEDED); + } + break; + } + } + } +// ------------ End Aggregator --------------- + + public: + template + join_node_FE(graph &g, FunctionTuple &TtoK_funcs) : forwarding_base_type(g), my_node(NULL) { + join_helper::set_join_node_pointer(my_inputs, this); + join_helper::set_key_functors(my_inputs, TtoK_funcs); + my_aggregator.initialize_handler(handler_type(this)); + TtoK_function_body_type *cfb = new TtoK_function_body_leaf_type(key_to_count_func()); + this->set_key_func(cfb); + } + + join_node_FE(const join_node_FE& other) : forwarding_base_type((other.forwarding_base_type::graph_ref)), key_to_count_buffer_type(), + output_buffer_type() { + my_node = NULL; + join_helper::set_join_node_pointer(my_inputs, this); + join_helper::copy_key_functors(my_inputs, const_cast(other.my_inputs)); + my_aggregator.initialize_handler(handler_type(this)); + TtoK_function_body_type *cfb = new TtoK_function_body_leaf_type(key_to_count_func()); + this->set_key_func(cfb); + } + + // needed for forwarding + void set_my_node(base_node_type *new_my_node) { my_node = new_my_node; } + + void reset_port_count() { // called from BE + key_matching_FE_operation op_data(res_count); + my_aggregator.execute(&op_data); + return; + } + + // if all input_ports have items, spawn forward to try and consume tuples + // return a task if we are asked and did create one. + task *increment_key_count(unref_key_type const & t, bool handle_task) __TBB_override { // called from input_ports + key_matching_FE_operation op_data(t, handle_task, inc_count); + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + + task *decrement_port_count(bool /*handle_task*/) __TBB_override { __TBB_ASSERT(false, NULL); return NULL; } + + void increment_port_count() __TBB_override { __TBB_ASSERT(false, NULL); } // should never be called + + input_type &input_ports() { return my_inputs; } + + protected: + + void reset( reset_flags f ) { + // called outside of parallel contexts + join_helper::reset_inputs(my_inputs, f); + + key_to_count_buffer_type::reset(); + output_buffer_type::reset(); + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract() { + // called outside of parallel contexts + join_helper::extract_inputs(my_inputs); + key_to_count_buffer_type::reset(); // have to reset the tag counts + output_buffer_type::reset(); // also the queue of outputs + // my_node->current_tag = NO_TAG; + } +#endif + // all methods on input ports should be called under mutual exclusion from join_node_base. + + bool tuple_build_may_succeed() { // called from back-end + key_matching_FE_operation op_data(may_succeed); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + // cannot lock while calling back to input_ports. current_key will only be set + // and reset under the aggregator, so it will remain consistent. + bool try_to_make_tuple(output_type &out) { + key_matching_FE_operation op_data(&out,try_make); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + void tuple_accepted() { + reset_port_count(); // reset current_key after ports reset. + } + + void tuple_rejected() { + // nothing to do. + } + + input_type my_inputs; // input ports + base_node_type *my_node; + }; // join_node_FE, InputTuple, OutputTuple> + + //! join_node_base + template + class join_node_base : public graph_node, public join_node_FE, + public sender { + protected: + using graph_node::my_graph; + public: + typedef OutputTuple output_type; + + typedef typename sender::successor_type successor_type; + typedef join_node_FE input_ports_type; + using input_ports_type::tuple_build_may_succeed; + using input_ports_type::try_to_make_tuple; + using input_ports_type::tuple_accepted; + using input_ports_type::tuple_rejected; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename sender::built_successors_type built_successors_type; + typedef typename sender::successor_list_type successor_list_type; +#endif + + private: + // ----------- Aggregator ------------ + enum op_type { reg_succ, rem_succ, try__get, do_fwrd, do_fwrd_bypass +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + , add_blt_succ, del_blt_succ, blt_succ_cnt, blt_succ_cpy +#endif + }; + typedef join_node_base class_type; + + class join_node_base_operation : public aggregated_operation { + public: + char type; + union { + output_type *my_arg; + successor_type *my_succ; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + size_t cnt_val; + successor_list_type *slist; +#endif + }; + task *bypass_t; + join_node_base_operation(const output_type& e, op_type t) : type(char(t)), + my_arg(const_cast(&e)), bypass_t(NULL) {} + join_node_base_operation(const successor_type &s, op_type t) : type(char(t)), + my_succ(const_cast(&s)), bypass_t(NULL) {} + join_node_base_operation(op_type t) : type(char(t)), bypass_t(NULL) {} + }; + + typedef internal::aggregating_functor handler_type; + friend class internal::aggregating_functor; + bool forwarder_busy; + aggregator my_aggregator; + + void handle_operations(join_node_base_operation* op_list) { + join_node_base_operation *current; + while(op_list) { + current = op_list; + op_list = op_list->next; + switch(current->type) { + case reg_succ: { + my_successors.register_successor(*(current->my_succ)); + if(tuple_build_may_succeed() && !forwarder_busy && internal::is_graph_active(my_graph)) { + task *rtask = new ( task::allocate_additional_child_of(*(my_graph.root_task())) ) + forward_task_bypass + >(*this); + internal::spawn_in_graph_arena(my_graph, *rtask); + forwarder_busy = true; + } + __TBB_store_with_release(current->status, SUCCEEDED); + } + break; + case rem_succ: + my_successors.remove_successor(*(current->my_succ)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case try__get: + if(tuple_build_may_succeed()) { + if(try_to_make_tuple(*(current->my_arg))) { + tuple_accepted(); + __TBB_store_with_release(current->status, SUCCEEDED); + } + else __TBB_store_with_release(current->status, FAILED); + } + else __TBB_store_with_release(current->status, FAILED); + break; + case do_fwrd_bypass: { + bool build_succeeded; + task *last_task = NULL; + output_type out; + if(tuple_build_may_succeed()) { // checks output queue of FE + do { + build_succeeded = try_to_make_tuple(out); // fetch front_end of queue + if(build_succeeded) { + task *new_task = my_successors.try_put_task(out); + last_task = combine_tasks(my_graph, last_task, new_task); + if(new_task) { + tuple_accepted(); + } + else { + tuple_rejected(); + build_succeeded = false; + } + } + } while(build_succeeded); + } + current->bypass_t = last_task; + __TBB_store_with_release(current->status, SUCCEEDED); + forwarder_busy = false; + } + break; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + case add_blt_succ: + my_successors.internal_add_built_successor(*(current->my_succ)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case del_blt_succ: + my_successors.internal_delete_built_successor(*(current->my_succ)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_succ_cnt: + current->cnt_val = my_successors.successor_count(); + __TBB_store_with_release(current->status, SUCCEEDED); + break; + case blt_succ_cpy: + my_successors.copy_successors(*(current->slist)); + __TBB_store_with_release(current->status, SUCCEEDED); + break; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + } + } + } + // ---------- end aggregator ----------- + public: + join_node_base(graph &g) : graph_node(g), input_ports_type(g), forwarder_busy(false) { + my_successors.set_owner(this); + input_ports_type::set_my_node(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + join_node_base(const join_node_base& other) : + graph_node(other.graph_node::my_graph), input_ports_type(other), + sender(), forwarder_busy(false), my_successors() { + my_successors.set_owner(this); + input_ports_type::set_my_node(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + template + join_node_base(graph &g, FunctionTuple f) : graph_node(g), input_ports_type(g, f), forwarder_busy(false) { + my_successors.set_owner(this); + input_ports_type::set_my_node(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + bool register_successor(successor_type &r) __TBB_override { + join_node_base_operation op_data(r, reg_succ); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + bool remove_successor( successor_type &r) __TBB_override { + join_node_base_operation op_data(r, rem_succ); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + + bool try_get( output_type &v) __TBB_override { + join_node_base_operation op_data(v, try__get); + my_aggregator.execute(&op_data); + return op_data.status == SUCCEEDED; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_successors_type &built_successors() __TBB_override { return my_successors.built_successors(); } + + void internal_add_built_successor( successor_type &r) __TBB_override { + join_node_base_operation op_data(r, add_blt_succ); + my_aggregator.execute(&op_data); + } + + void internal_delete_built_successor( successor_type &r) __TBB_override { + join_node_base_operation op_data(r, del_blt_succ); + my_aggregator.execute(&op_data); + } + + size_t successor_count() __TBB_override { + join_node_base_operation op_data(blt_succ_cnt); + my_aggregator.execute(&op_data); + return op_data.cnt_val; + } + + void copy_successors(successor_list_type &l) __TBB_override { + join_node_base_operation op_data(blt_succ_cpy); + op_data.slist = &l; + my_aggregator.execute(&op_data); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract() __TBB_override { + input_ports_type::extract(); + my_successors.built_successors().sender_extract(*this); + } +#endif + + protected: + + void reset_node(reset_flags f) __TBB_override { + input_ports_type::reset(f); + if(f & rf_clear_edges) my_successors.clear(); + } + + private: + broadcast_cache my_successors; + + friend class forward_task_bypass< join_node_base >; + task *forward_task() { + join_node_base_operation op_data(do_fwrd_bypass); + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + + }; // join_node_base + + // join base class type generator + template class PT, typename OutputTuple, typename JP> + struct join_base { + typedef typename internal::join_node_base::type, OutputTuple> type; + }; + + template + struct join_base > { + typedef key_matching key_traits_type; + typedef K key_type; + typedef KHash key_hash_compare; + typedef typename internal::join_node_base< key_traits_type, + // ports type + typename wrap_key_tuple_elements::type, + OutputTuple > type; + }; + + //! unfolded_join_node : passes input_ports_type to join_node_base. We build the input port type + // using tuple_element. The class PT is the port type (reserving_port, queueing_port, key_matching_port) + // and should match the typename. + + template class PT, typename OutputTuple, typename JP> + class unfolded_join_node : public join_base::type { + public: + typedef typename wrap_tuple_elements::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base base_type; + public: + unfolded_join_node(graph &g) : base_type(g) {} + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + template + struct key_from_message_body { + K operator()(const T& t) const { + using tbb::flow::key_from_message; + return key_from_message(t); + } + }; + // Adds const to reference type + template + struct key_from_message_body { + const K& operator()(const T& t) const { + using tbb::flow::key_from_message; + return key_from_message(t); + } + }; +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + // key_matching unfolded_join_node. This must be a separate specialization because the constructors + // differ. + + template + class unfolded_join_node<2,key_matching_port,OutputTuple,key_matching > : public + join_base<2,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + public: + typedef typename wrap_key_tuple_elements<2,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base, input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename tbb::flow::tuple< f0_p, f1_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 2, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + + template + class unfolded_join_node<3,key_matching_port,OutputTuple,key_matching > : public + join_base<3,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; + public: + typedef typename wrap_key_tuple_elements<3,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base, input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename internal::type_to_key_function_body *f2_p; + typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1), + new internal::type_to_key_function_body_leaf(body2) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 3, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + + template + class unfolded_join_node<4,key_matching_port,OutputTuple,key_matching > : public + join_base<4,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; + typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; + public: + typedef typename wrap_key_tuple_elements<4,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base, input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename internal::type_to_key_function_body *f2_p; + typedef typename internal::type_to_key_function_body *f3_p; + typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1), + new internal::type_to_key_function_body_leaf(body2), + new internal::type_to_key_function_body_leaf(body3) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 4, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + + template + class unfolded_join_node<5,key_matching_port,OutputTuple,key_matching > : public + join_base<5,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; + typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; + typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; + public: + typedef typename wrap_key_tuple_elements<5,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename internal::type_to_key_function_body *f2_p; + typedef typename internal::type_to_key_function_body *f3_p; + typedef typename internal::type_to_key_function_body *f4_p; + typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1), + new internal::type_to_key_function_body_leaf(body2), + new internal::type_to_key_function_body_leaf(body3), + new internal::type_to_key_function_body_leaf(body4) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 5, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; + +#if __TBB_VARIADIC_MAX >= 6 + template + class unfolded_join_node<6,key_matching_port,OutputTuple,key_matching > : public + join_base<6,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; + typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; + typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; + typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; + public: + typedef typename wrap_key_tuple_elements<6,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename internal::type_to_key_function_body *f2_p; + typedef typename internal::type_to_key_function_body *f3_p; + typedef typename internal::type_to_key_function_body *f4_p; + typedef typename internal::type_to_key_function_body *f5_p; + typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, Body5 body5) + : base_type(g, func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1), + new internal::type_to_key_function_body_leaf(body2), + new internal::type_to_key_function_body_leaf(body3), + new internal::type_to_key_function_body_leaf(body4), + new internal::type_to_key_function_body_leaf(body5) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 6, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + +#if __TBB_VARIADIC_MAX >= 7 + template + class unfolded_join_node<7,key_matching_port,OutputTuple,key_matching > : public + join_base<7,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; + typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; + typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; + typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; + typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; + public: + typedef typename wrap_key_tuple_elements<7,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename internal::type_to_key_function_body *f2_p; + typedef typename internal::type_to_key_function_body *f3_p; + typedef typename internal::type_to_key_function_body *f4_p; + typedef typename internal::type_to_key_function_body *f5_p; + typedef typename internal::type_to_key_function_body *f6_p; + typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, + Body5 body5, Body6 body6) : base_type(g, func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1), + new internal::type_to_key_function_body_leaf(body2), + new internal::type_to_key_function_body_leaf(body3), + new internal::type_to_key_function_body_leaf(body4), + new internal::type_to_key_function_body_leaf(body5), + new internal::type_to_key_function_body_leaf(body6) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 7, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + +#if __TBB_VARIADIC_MAX >= 8 + template + class unfolded_join_node<8,key_matching_port,OutputTuple,key_matching > : public + join_base<8,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; + typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; + typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; + typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; + typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; + typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; + public: + typedef typename wrap_key_tuple_elements<8,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename internal::type_to_key_function_body *f2_p; + typedef typename internal::type_to_key_function_body *f3_p; + typedef typename internal::type_to_key_function_body *f4_p; + typedef typename internal::type_to_key_function_body *f5_p; + typedef typename internal::type_to_key_function_body *f6_p; + typedef typename internal::type_to_key_function_body *f7_p; + typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, + Body5 body5, Body6 body6, Body7 body7) : base_type(g, func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1), + new internal::type_to_key_function_body_leaf(body2), + new internal::type_to_key_function_body_leaf(body3), + new internal::type_to_key_function_body_leaf(body4), + new internal::type_to_key_function_body_leaf(body5), + new internal::type_to_key_function_body_leaf(body6), + new internal::type_to_key_function_body_leaf(body7) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 8, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + +#if __TBB_VARIADIC_MAX >= 9 + template + class unfolded_join_node<9,key_matching_port,OutputTuple,key_matching > : public + join_base<9,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; + typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; + typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; + typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; + typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; + typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; + typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8; + public: + typedef typename wrap_key_tuple_elements<9,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename internal::type_to_key_function_body *f2_p; + typedef typename internal::type_to_key_function_body *f3_p; + typedef typename internal::type_to_key_function_body *f4_p; + typedef typename internal::type_to_key_function_body *f5_p; + typedef typename internal::type_to_key_function_body *f6_p; + typedef typename internal::type_to_key_function_body *f7_p; + typedef typename internal::type_to_key_function_body *f8_p; + typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, + Body5 body5, Body6 body6, Body7 body7, Body8 body8) : base_type(g, func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1), + new internal::type_to_key_function_body_leaf(body2), + new internal::type_to_key_function_body_leaf(body3), + new internal::type_to_key_function_body_leaf(body4), + new internal::type_to_key_function_body_leaf(body5), + new internal::type_to_key_function_body_leaf(body6), + new internal::type_to_key_function_body_leaf(body7), + new internal::type_to_key_function_body_leaf(body8) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 9, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + +#if __TBB_VARIADIC_MAX >= 10 + template + class unfolded_join_node<10,key_matching_port,OutputTuple,key_matching > : public + join_base<10,key_matching_port,OutputTuple,key_matching >::type { + typedef typename tbb::flow::tuple_element<0, OutputTuple>::type T0; + typedef typename tbb::flow::tuple_element<1, OutputTuple>::type T1; + typedef typename tbb::flow::tuple_element<2, OutputTuple>::type T2; + typedef typename tbb::flow::tuple_element<3, OutputTuple>::type T3; + typedef typename tbb::flow::tuple_element<4, OutputTuple>::type T4; + typedef typename tbb::flow::tuple_element<5, OutputTuple>::type T5; + typedef typename tbb::flow::tuple_element<6, OutputTuple>::type T6; + typedef typename tbb::flow::tuple_element<7, OutputTuple>::type T7; + typedef typename tbb::flow::tuple_element<8, OutputTuple>::type T8; + typedef typename tbb::flow::tuple_element<9, OutputTuple>::type T9; + public: + typedef typename wrap_key_tuple_elements<10,key_matching_port,key_matching,OutputTuple>::type input_ports_type; + typedef OutputTuple output_type; + private: + typedef join_node_base , input_ports_type, output_type > base_type; + typedef typename internal::type_to_key_function_body *f0_p; + typedef typename internal::type_to_key_function_body *f1_p; + typedef typename internal::type_to_key_function_body *f2_p; + typedef typename internal::type_to_key_function_body *f3_p; + typedef typename internal::type_to_key_function_body *f4_p; + typedef typename internal::type_to_key_function_body *f5_p; + typedef typename internal::type_to_key_function_body *f6_p; + typedef typename internal::type_to_key_function_body *f7_p; + typedef typename internal::type_to_key_function_body *f8_p; + typedef typename internal::type_to_key_function_body *f9_p; + typedef typename tbb::flow::tuple< f0_p, f1_p, f2_p, f3_p, f4_p, f5_p, f6_p, f7_p, f8_p, f9_p > func_initializer_type; + public: +#if __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING + unfolded_join_node(graph &g) : base_type(g, + func_initializer_type( + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()), + new internal::type_to_key_function_body_leaf >(key_from_message_body()) + ) ) { + } +#endif /* __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING */ + template + unfolded_join_node(graph &g, Body0 body0, Body1 body1, Body2 body2, Body3 body3, Body4 body4, + Body5 body5, Body6 body6, Body7 body7, Body8 body8, Body9 body9) : base_type(g, func_initializer_type( + new internal::type_to_key_function_body_leaf(body0), + new internal::type_to_key_function_body_leaf(body1), + new internal::type_to_key_function_body_leaf(body2), + new internal::type_to_key_function_body_leaf(body3), + new internal::type_to_key_function_body_leaf(body4), + new internal::type_to_key_function_body_leaf(body5), + new internal::type_to_key_function_body_leaf(body6), + new internal::type_to_key_function_body_leaf(body7), + new internal::type_to_key_function_body_leaf(body8), + new internal::type_to_key_function_body_leaf(body9) + ) ) { + __TBB_STATIC_ASSERT(tbb::flow::tuple_size::value == 10, "wrong number of body initializers"); + } + unfolded_join_node(const unfolded_join_node &other) : base_type(other) {} + }; +#endif + + //! templated function to refer to input ports of the join node + template + typename tbb::flow::tuple_element::type &input_port(JNT &jn) { + return tbb::flow::get(jn.input_ports()); + } + +} +#endif // __TBB__flow_graph_join_impl_H + diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_node_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_node_impl.h new file mode 100644 index 00000000..dbc56ecd --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_node_impl.h @@ -0,0 +1,971 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_node_impl_H +#define __TBB__flow_graph_node_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include "_flow_graph_item_buffer_impl.h" + +//! @cond INTERNAL +namespace internal { + + using tbb::internal::aggregated_operation; + using tbb::internal::aggregating_functor; + using tbb::internal::aggregator; + + template< typename T, typename A > + class function_input_queue : public item_buffer { + public: + bool empty() const { + return this->buffer_empty(); + } + + const T& front() const { + return this->item_buffer::front(); + } + + bool pop( T& t ) { + return this->pop_front( t ); + } + + void pop() { + this->destroy_front(); + } + + bool push( T& t ) { + return this->push_back( t ); + } + }; + + //! Input and scheduling for a function node that takes a type Input as input + // The only up-ref is apply_body_impl, which should implement the function + // call and any handling of the result. + template< typename Input, typename Policy, typename A, typename ImplType > + class function_input_base : public receiver, tbb::internal::no_assign { + enum op_type {reg_pred, rem_pred, try_fwd, tryput_bypass, app_body_bypass, occupy_concurrency +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + , add_blt_pred, del_blt_pred, + blt_pred_cnt, blt_pred_cpy // create vector copies of preds and succs +#endif + }; + typedef function_input_base class_type; + + public: + + //! The input type of this receiver + typedef Input input_type; + typedef typename receiver::predecessor_type predecessor_type; + typedef predecessor_cache predecessor_cache_type; + typedef function_input_queue input_queue_type; + typedef typename tbb::internal::allocator_rebind::type queue_allocator_type; + __TBB_STATIC_ASSERT(!((internal::has_policy::value) && (internal::has_policy::value)), + "queueing and rejecting policies can't be specified simultaneously"); + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename predecessor_cache_type::built_predecessors_type built_predecessors_type; + typedef typename receiver::predecessor_list_type predecessor_list_type; +#endif + + //! Constructor for function_input_base + function_input_base( + graph &g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(size_t max_concurrency, node_priority_t priority) + ) : my_graph_ref(g), my_max_concurrency(max_concurrency) + , __TBB_FLOW_GRAPH_PRIORITY_ARG1(my_concurrency(0), my_priority(priority)) + , my_queue(!internal::has_policy::value ? new input_queue_type() : NULL) + , forwarder_busy(false) + { + my_predecessors.set_owner(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + //! Copy constructor + function_input_base( const function_input_base& src) + : receiver(), tbb::internal::no_assign() + , my_graph_ref(src.my_graph_ref), my_max_concurrency(src.my_max_concurrency) + , __TBB_FLOW_GRAPH_PRIORITY_ARG1(my_concurrency(0), my_priority(src.my_priority)) + , my_queue(src.my_queue ? new input_queue_type() : NULL), forwarder_busy(false) + { + my_predecessors.set_owner(this); + my_aggregator.initialize_handler(handler_type(this)); + } + + //! Destructor + // The queue is allocated by the constructor for {multi}function_node. + // TODO: pass the graph_buffer_policy to the base so it can allocate the queue instead. + // This would be an interface-breaking change. + virtual ~function_input_base() { + if ( my_queue ) delete my_queue; + } + + task* try_put_task( const input_type& t) __TBB_override { + return try_put_task_impl(t, internal::has_policy()); + } + + //! Adds src to the list of cached predecessors. + bool register_predecessor( predecessor_type &src ) __TBB_override { + operation_type op_data(reg_pred); + op_data.r = &src; + my_aggregator.execute(&op_data); + return true; + } + + //! Removes src from the list of cached predecessors. + bool remove_predecessor( predecessor_type &src ) __TBB_override { + operation_type op_data(rem_pred); + op_data.r = &src; + my_aggregator.execute(&op_data); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + //! Adds to list of predecessors added by make_edge + void internal_add_built_predecessor( predecessor_type &src) __TBB_override { + operation_type op_data(add_blt_pred); + op_data.r = &src; + my_aggregator.execute(&op_data); + } + + //! removes from to list of predecessors (used by remove_edge) + void internal_delete_built_predecessor( predecessor_type &src) __TBB_override { + operation_type op_data(del_blt_pred); + op_data.r = &src; + my_aggregator.execute(&op_data); + } + + size_t predecessor_count() __TBB_override { + operation_type op_data(blt_pred_cnt); + my_aggregator.execute(&op_data); + return op_data.cnt_val; + } + + void copy_predecessors(predecessor_list_type &v) __TBB_override { + operation_type op_data(blt_pred_cpy); + op_data.predv = &v; + my_aggregator.execute(&op_data); + } + + built_predecessors_type &built_predecessors() __TBB_override { + return my_predecessors.built_predecessors(); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + protected: + + void reset_function_input_base( reset_flags f) { + my_concurrency = 0; + if(my_queue) { + my_queue->reset(); + } + reset_receiver(f); + forwarder_busy = false; + } + + graph& my_graph_ref; + const size_t my_max_concurrency; + size_t my_concurrency; + __TBB_FLOW_GRAPH_PRIORITY_EXPR( node_priority_t my_priority; ) + input_queue_type *my_queue; + predecessor_cache my_predecessors; + + void reset_receiver( reset_flags f) __TBB_override { + if( f & rf_clear_edges) my_predecessors.clear(); + else + my_predecessors.reset(); + __TBB_ASSERT(!(f & rf_clear_edges) || my_predecessors.empty(), "function_input_base reset failed"); + } + + graph& graph_reference() const __TBB_override { + return my_graph_ref; + } + + task* try_get_postponed_task(const input_type& i) { + operation_type op_data(i, app_body_bypass); // tries to pop an item or get_item + my_aggregator.execute(&op_data); + return op_data.bypass_t; + } + + private: + + friend class apply_body_task_bypass< class_type, input_type >; + friend class forward_task_bypass< class_type >; + + class operation_type : public aggregated_operation< operation_type > { + public: + char type; + union { + input_type *elem; + predecessor_type *r; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + size_t cnt_val; + predecessor_list_type *predv; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + }; + tbb::task *bypass_t; + operation_type(const input_type& e, op_type t) : + type(char(t)), elem(const_cast(&e)) {} + operation_type(op_type t) : type(char(t)), r(NULL) {} + }; + + bool forwarder_busy; + typedef internal::aggregating_functor handler_type; + friend class internal::aggregating_functor; + aggregator< handler_type, operation_type > my_aggregator; + + task* perform_queued_requests() { + task* new_task = NULL; + if(my_queue) { + if(!my_queue->empty()) { + ++my_concurrency; + new_task = create_body_task(my_queue->front()); + + my_queue->pop(); + } + } + else { + input_type i; + if(my_predecessors.get_item(i)) { + ++my_concurrency; + new_task = create_body_task(i); + } + } + return new_task; + } + void handle_operations(operation_type *op_list) { + operation_type *tmp; + while (op_list) { + tmp = op_list; + op_list = op_list->next; + switch (tmp->type) { + case reg_pred: + my_predecessors.add(*(tmp->r)); + __TBB_store_with_release(tmp->status, SUCCEEDED); + if (!forwarder_busy) { + forwarder_busy = true; + spawn_forward_task(); + } + break; + case rem_pred: + my_predecessors.remove(*(tmp->r)); + __TBB_store_with_release(tmp->status, SUCCEEDED); + break; + case app_body_bypass: { + tmp->bypass_t = NULL; + __TBB_ASSERT(my_max_concurrency != 0, NULL); + --my_concurrency; + if(my_concurrencybypass_t = perform_queued_requests(); + + __TBB_store_with_release(tmp->status, SUCCEEDED); + } + break; + case tryput_bypass: internal_try_put_task(tmp); break; + case try_fwd: internal_forward(tmp); break; + case occupy_concurrency: + if (my_concurrency < my_max_concurrency) { + ++my_concurrency; + __TBB_store_with_release(tmp->status, SUCCEEDED); + } else { + __TBB_store_with_release(tmp->status, FAILED); + } + break; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + case add_blt_pred: { + my_predecessors.internal_add_built_predecessor(*(tmp->r)); + __TBB_store_with_release(tmp->status, SUCCEEDED); + } + break; + case del_blt_pred: + my_predecessors.internal_delete_built_predecessor(*(tmp->r)); + __TBB_store_with_release(tmp->status, SUCCEEDED); + break; + case blt_pred_cnt: + tmp->cnt_val = my_predecessors.predecessor_count(); + __TBB_store_with_release(tmp->status, SUCCEEDED); + break; + case blt_pred_cpy: + my_predecessors.copy_predecessors( *(tmp->predv) ); + __TBB_store_with_release(tmp->status, SUCCEEDED); + break; +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + } + } + } + + //! Put to the node, but return the task instead of enqueueing it + void internal_try_put_task(operation_type *op) { + __TBB_ASSERT(my_max_concurrency != 0, NULL); + if (my_concurrency < my_max_concurrency) { + ++my_concurrency; + task * new_task = create_body_task(*(op->elem)); + op->bypass_t = new_task; + __TBB_store_with_release(op->status, SUCCEEDED); + } else if ( my_queue && my_queue->push(*(op->elem)) ) { + op->bypass_t = SUCCESSFULLY_ENQUEUED; + __TBB_store_with_release(op->status, SUCCEEDED); + } else { + op->bypass_t = NULL; + __TBB_store_with_release(op->status, FAILED); + } + } + + //! Creates tasks for postponed messages if available and if concurrency allows + void internal_forward(operation_type *op) { + op->bypass_t = NULL; + if (my_concurrency < my_max_concurrency || !my_max_concurrency) + op->bypass_t = perform_queued_requests(); + if(op->bypass_t) + __TBB_store_with_release(op->status, SUCCEEDED); + else { + forwarder_busy = false; + __TBB_store_with_release(op->status, FAILED); + } + } + + task* internal_try_put_bypass( const input_type& t ) { + operation_type op_data(t, tryput_bypass); + my_aggregator.execute(&op_data); + if( op_data.status == internal::SUCCEEDED ) { + return op_data.bypass_t; + } + return NULL; + } + + task* try_put_task_impl( const input_type& t, /*lightweight=*/tbb::internal::true_type ) { + if( my_max_concurrency == 0 ) { + return apply_body_bypass(t); + } else { + operation_type check_op(t, occupy_concurrency); + my_aggregator.execute(&check_op); + if( check_op.status == internal::SUCCEEDED ) { + return apply_body_bypass(t); + } + return internal_try_put_bypass(t); + } + } + + task* try_put_task_impl( const input_type& t, /*lightweight=*/tbb::internal::false_type ) { + if( my_max_concurrency == 0 ) { + return create_body_task(t); + } else { + return internal_try_put_bypass(t); + } + } + + //! Applies the body to the provided input + // then decides if more work is available + task * apply_body_bypass( const input_type &i ) { + return static_cast(this)->apply_body_impl_bypass(i); + } + + //! allocates a task to apply a body + inline task * create_body_task( const input_type &input ) { + return (internal::is_graph_active(my_graph_ref)) ? + new( task::allocate_additional_child_of(*(my_graph_ref.root_task())) ) + apply_body_task_bypass < class_type, input_type >( + *this, __TBB_FLOW_GRAPH_PRIORITY_ARG1(input, my_priority)) + : NULL; + } + + //! This is executed by an enqueued task, the "forwarder" + task* forward_task() { + operation_type op_data(try_fwd); + task* rval = NULL; + do { + op_data.status = WAIT; + my_aggregator.execute(&op_data); + if(op_data.status == SUCCEEDED) { + task* ttask = op_data.bypass_t; + __TBB_ASSERT( ttask && ttask != SUCCESSFULLY_ENQUEUED, NULL ); + rval = combine_tasks(my_graph_ref, rval, ttask); + } + } while (op_data.status == SUCCEEDED); + return rval; + } + + inline task *create_forward_task() { + return (internal::is_graph_active(my_graph_ref)) ? + new( task::allocate_additional_child_of(*(my_graph_ref.root_task())) ) + forward_task_bypass< class_type >( __TBB_FLOW_GRAPH_PRIORITY_ARG1(*this, my_priority) ) + : NULL; + } + + //! Spawns a task that calls forward() + inline void spawn_forward_task() { + task* tp = create_forward_task(); + if(tp) { + internal::spawn_in_graph_arena(graph_reference(), *tp); + } + } + }; // function_input_base + + //! Implements methods for a function node that takes a type Input as input and sends + // a type Output to its successors. + template< typename Input, typename Output, typename Policy, typename A> + class function_input : public function_input_base > { + public: + typedef Input input_type; + typedef Output output_type; + typedef function_body function_body_type; + typedef function_input my_class; + typedef function_input_base base_type; + typedef function_input_queue input_queue_type; + + // constructor + template + function_input( + graph &g, size_t max_concurrency, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body& body, node_priority_t priority) + ) : base_type(g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(max_concurrency, priority)) + , my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) { + } + + //! Copy constructor + function_input( const function_input& src ) : + base_type(src), + my_body( src.my_init_body->clone() ), + my_init_body(src.my_init_body->clone() ) { + } + + ~function_input() { + delete my_body; + delete my_init_body; + } + + template< typename Body > + Body copy_function_object() { + function_body_type &body_ref = *this->my_body; + return dynamic_cast< internal::function_body_leaf & >(body_ref).get_body(); + } + + output_type apply_body_impl( const input_type& i) { + // There is an extra copied needed to capture the + // body execution without the try_put + tbb::internal::fgt_begin_body( my_body ); + output_type v = (*my_body)(i); + tbb::internal::fgt_end_body( my_body ); + return v; + } + + //TODO: consider moving into the base class + task * apply_body_impl_bypass( const input_type &i) { + output_type v = apply_body_impl(i); +#if TBB_DEPRECATED_MESSAGE_FLOW_ORDER + task* successor_task = successors().try_put_task(v); +#endif + task* postponed_task = NULL; + if( base_type::my_max_concurrency != 0 ) { + postponed_task = base_type::try_get_postponed_task(i); + __TBB_ASSERT( !postponed_task || postponed_task != SUCCESSFULLY_ENQUEUED, NULL ); + } +#if TBB_DEPRECATED_MESSAGE_FLOW_ORDER + graph& g = base_type::my_graph_ref; + return combine_tasks(g, successor_task, postponed_task); +#else + if( postponed_task ) { + // make the task available for other workers since we do not know successors' + // execution policy + internal::spawn_in_graph_arena(base_type::graph_reference(), *postponed_task); + } + task* successor_task = successors().try_put_task(v); +#if _MSC_VER && !__INTEL_COMPILER +#pragma warning (push) +#pragma warning (disable: 4127) /* suppress conditional expression is constant */ +#endif + if(internal::has_policy::value) { +#if _MSC_VER && !__INTEL_COMPILER +#pragma warning (pop) +#endif + if(!successor_task) { + // Return confirmative status since current + // node's body has been executed anyway + successor_task = SUCCESSFULLY_ENQUEUED; + } + } + return successor_task; +#endif /* TBB_DEPRECATED_MESSAGE_FLOW_ORDER */ + } + + protected: + + void reset_function_input(reset_flags f) { + base_type::reset_function_input_base(f); + if(f & rf_reset_bodies) { + function_body_type *tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + } + + function_body_type *my_body; + function_body_type *my_init_body; + virtual broadcast_cache &successors() = 0; + + }; // function_input + + + // helper templates to clear the successor edges of the output ports of an multifunction_node + template struct clear_element { + template static void clear_this(P &p) { + (void)tbb::flow::get(p).successors().clear(); + clear_element::clear_this(p); + } + template static bool this_empty(P &p) { + if(tbb::flow::get(p).successors().empty()) + return clear_element::this_empty(p); + return false; + } + }; + + template<> struct clear_element<1> { + template static void clear_this(P &p) { + (void)tbb::flow::get<0>(p).successors().clear(); + } + template static bool this_empty(P &p) { + return tbb::flow::get<0>(p).successors().empty(); + } + }; + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + // helper templates to extract the output ports of an multifunction_node from graph + template struct extract_element { + template static void extract_this(P &p) { + (void)tbb::flow::get(p).successors().built_successors().sender_extract(tbb::flow::get(p)); + extract_element::extract_this(p); + } + }; + + template<> struct extract_element<1> { + template static void extract_this(P &p) { + (void)tbb::flow::get<0>(p).successors().built_successors().sender_extract(tbb::flow::get<0>(p)); + } + }; +#endif + + template + struct init_output_ports { +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(Args(g)...); + } +#else // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g)); + } + + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g)); + } + + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g)); + } + + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g)); + } + + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g)); + } +#if __TBB_VARIADIC_MAX >= 6 + template + static OutputTuple call(graph& g, const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g)); + } +#endif +#if __TBB_VARIADIC_MAX >= 7 + template + static OutputTuple call(graph& g, + const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g), T7(g)); + } +#endif +#if __TBB_VARIADIC_MAX >= 8 + template + static OutputTuple call(graph& g, + const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g), T7(g), T8(g)); + } +#endif +#if __TBB_VARIADIC_MAX >= 9 + template + static OutputTuple call(graph& g, + const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g), T7(g), T8(g), T9(g)); + } +#endif +#if __TBB_VARIADIC_MAX >= 9 + template + static OutputTuple call(graph& g, + const tbb::flow::tuple&) { + return OutputTuple(T1(g), T2(g), T3(g), T4(g), T5(g), T6(g), T7(g), T8(g), T9(g), T10(g)); + } +#endif +#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + }; // struct init_output_ports + + //! Implements methods for a function node that takes a type Input as input + // and has a tuple of output ports specified. + template< typename Input, typename OutputPortSet, typename Policy, typename A> + class multifunction_input : public function_input_base > { + public: + static const int N = tbb::flow::tuple_size::value; + typedef Input input_type; + typedef OutputPortSet output_ports_type; + typedef multifunction_body multifunction_body_type; + typedef multifunction_input my_class; + typedef function_input_base base_type; + typedef function_input_queue input_queue_type; + + // constructor + template + multifunction_input(graph &g, size_t max_concurrency, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body& body, node_priority_t priority) + ) : base_type(g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(max_concurrency, priority)) + , my_body( new internal::multifunction_body_leaf(body) ) + , my_init_body( new internal::multifunction_body_leaf(body) ) + , my_output_ports(init_output_ports::call(g, my_output_ports)){ + } + + //! Copy constructor + multifunction_input( const multifunction_input& src ) : + base_type(src), + my_body( src.my_init_body->clone() ), + my_init_body(src.my_init_body->clone() ), + my_output_ports( init_output_ports::call(src.my_graph_ref, my_output_ports) ) { + } + + ~multifunction_input() { + delete my_body; + delete my_init_body; + } + + template< typename Body > + Body copy_function_object() { + multifunction_body_type &body_ref = *this->my_body; + return *static_cast(dynamic_cast< internal::multifunction_body_leaf & >(body_ref).get_body_ptr()); + } + + // for multifunction nodes we do not have a single successor as such. So we just tell + // the task we were successful. + //TODO: consider moving common parts with implementation in function_input into separate function + task * apply_body_impl_bypass( const input_type &i) { + tbb::internal::fgt_begin_body( my_body ); + (*my_body)(i, my_output_ports); + tbb::internal::fgt_end_body( my_body ); + task* ttask = NULL; + if(base_type::my_max_concurrency != 0) { + ttask = base_type::try_get_postponed_task(i); + } + return ttask ? ttask : SUCCESSFULLY_ENQUEUED; + } + + output_ports_type &output_ports(){ return my_output_ports; } + + protected: +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + void extract() { + extract_element::extract_this(my_output_ports); + } +#endif + + void reset(reset_flags f) { + base_type::reset_function_input_base(f); + if(f & rf_clear_edges)clear_element::clear_this(my_output_ports); + if(f & rf_reset_bodies) { + multifunction_body_type *tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + __TBB_ASSERT(!(f & rf_clear_edges) || clear_element::this_empty(my_output_ports), "multifunction_node reset failed"); + } + + multifunction_body_type *my_body; + multifunction_body_type *my_init_body; + output_ports_type my_output_ports; + + }; // multifunction_input + + // template to refer to an output port of a multifunction_node + template + typename tbb::flow::tuple_element::type &output_port(MOP &op) { + return tbb::flow::get(op.output_ports()); + } + + inline void check_task_and_spawn(graph& g, task* t) { + if (t && t != SUCCESSFULLY_ENQUEUED) { + internal::spawn_in_graph_arena(g, *t); + } + } + + // helper structs for split_node + template + struct emit_element { + template + static task* emit_this(graph& g, const T &t, P &p) { + // TODO: consider to collect all the tasks in task_list and spawn them all at once + task* last_task = tbb::flow::get(p).try_put_task(tbb::flow::get(t)); + check_task_and_spawn(g, last_task); + return emit_element::emit_this(g,t,p); + } + }; + + template<> + struct emit_element<1> { + template + static task* emit_this(graph& g, const T &t, P &p) { + task* last_task = tbb::flow::get<0>(p).try_put_task(tbb::flow::get<0>(t)); + check_task_and_spawn(g, last_task); + return SUCCESSFULLY_ENQUEUED; + } + }; + + //! Implements methods for an executable node that takes continue_msg as input + template< typename Output, typename Policy> + class continue_input : public continue_receiver { + public: + + //! The input type of this receiver + typedef continue_msg input_type; + + //! The output type of this receiver + typedef Output output_type; + typedef function_body function_body_type; + typedef continue_input class_type; + + template< typename Body > + continue_input( graph &g, __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body& body, node_priority_t priority) ) + : continue_receiver(__TBB_FLOW_GRAPH_PRIORITY_ARG1(/*number_of_predecessors=*/0, priority)) + , my_graph_ref(g) + , my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + { } + + template< typename Body > + continue_input( graph &g, int number_of_predecessors, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body& body, node_priority_t priority) + ) : continue_receiver( __TBB_FLOW_GRAPH_PRIORITY_ARG1(number_of_predecessors, priority) ) + , my_graph_ref(g) + , my_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + , my_init_body( new internal::function_body_leaf< input_type, output_type, Body>(body) ) + { } + + continue_input( const continue_input& src ) : continue_receiver(src), + my_graph_ref(src.my_graph_ref), + my_body( src.my_init_body->clone() ), + my_init_body( src.my_init_body->clone() ) {} + + ~continue_input() { + delete my_body; + delete my_init_body; + } + + template< typename Body > + Body copy_function_object() { + function_body_type &body_ref = *my_body; + return dynamic_cast< internal::function_body_leaf & >(body_ref).get_body(); + } + + void reset_receiver( reset_flags f) __TBB_override { + continue_receiver::reset_receiver(f); + if(f & rf_reset_bodies) { + function_body_type *tmp = my_init_body->clone(); + delete my_body; + my_body = tmp; + } + } + + protected: + + graph& my_graph_ref; + function_body_type *my_body; + function_body_type *my_init_body; + + virtual broadcast_cache &successors() = 0; + + friend class apply_body_task_bypass< class_type, continue_msg >; + + //! Applies the body to the provided input + task *apply_body_bypass( input_type ) { + // There is an extra copied needed to capture the + // body execution without the try_put + tbb::internal::fgt_begin_body( my_body ); + output_type v = (*my_body)( continue_msg() ); + tbb::internal::fgt_end_body( my_body ); + return successors().try_put_task( v ); + } + + task* execute() __TBB_override { + if(!internal::is_graph_active(my_graph_ref)) { + return NULL; + } +#if _MSC_VER && !__INTEL_COMPILER +#pragma warning (push) +#pragma warning (disable: 4127) /* suppress conditional expression is constant */ +#endif + if(internal::has_policy::value) { +#if _MSC_VER && !__INTEL_COMPILER +#pragma warning (pop) +#endif + return apply_body_bypass( continue_msg() ); + } + else { + return new ( task::allocate_additional_child_of( *(my_graph_ref.root_task()) ) ) + apply_body_task_bypass< class_type, continue_msg >( + *this, __TBB_FLOW_GRAPH_PRIORITY_ARG1(continue_msg(), my_priority) ); + } + } + + graph& graph_reference() const __TBB_override { + return my_graph_ref; + } + }; // continue_input + + //! Implements methods for both executable and function nodes that puts Output to its successors + template< typename Output > + class function_output : public sender { + public: + + template friend struct clear_element; + typedef Output output_type; + typedef typename sender::successor_type successor_type; + typedef broadcast_cache broadcast_cache_type; +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + typedef typename sender::built_successors_type built_successors_type; + typedef typename sender::successor_list_type successor_list_type; +#endif + + function_output( graph& g) : my_graph_ref(g) { my_successors.set_owner(this); } + function_output(const function_output & other) : sender(), my_graph_ref(other.my_graph_ref) { + my_successors.set_owner(this); + } + + //! Adds a new successor to this node + bool register_successor( successor_type &r ) __TBB_override { + successors().register_successor( r ); + return true; + } + + //! Removes a successor from this node + bool remove_successor( successor_type &r ) __TBB_override { + successors().remove_successor( r ); + return true; + } + +#if TBB_DEPRECATED_FLOW_NODE_EXTRACTION + built_successors_type &built_successors() __TBB_override { return successors().built_successors(); } + + + void internal_add_built_successor( successor_type &r) __TBB_override { + successors().internal_add_built_successor( r ); + } + + void internal_delete_built_successor( successor_type &r) __TBB_override { + successors().internal_delete_built_successor( r ); + } + + size_t successor_count() __TBB_override { + return successors().successor_count(); + } + + void copy_successors( successor_list_type &v) __TBB_override { + successors().copy_successors(v); + } +#endif /* TBB_DEPRECATED_FLOW_NODE_EXTRACTION */ + + // for multifunction_node. The function_body that implements + // the node will have an input and an output tuple of ports. To put + // an item to a successor, the body should + // + // get(output_ports).try_put(output_value); + // + // if task pointer is returned will always spawn and return true, else + // return value will be bool returned from successors.try_put. + task *try_put_task(const output_type &i) { // not a virtual method in this class + return my_successors.try_put_task(i); + } + + broadcast_cache_type &successors() { return my_successors; } + + graph& graph_reference() const { return my_graph_ref; } + protected: + broadcast_cache_type my_successors; + graph& my_graph_ref; + }; // function_output + + template< typename Output > + class multifunction_output : public function_output { + public: + typedef Output output_type; + typedef function_output base_type; + using base_type::my_successors; + + multifunction_output(graph& g) : base_type(g) {my_successors.set_owner(this);} + multifunction_output( const multifunction_output& other) : base_type(other.my_graph_ref) { my_successors.set_owner(this); } + + bool try_put(const output_type &i) { + task *res = try_put_task(i); + if(!res) return false; + if(res != SUCCESSFULLY_ENQUEUED) { + FLOW_SPAWN(*res); // TODO: Spawn task inside arena + } + return true; + } + + using base_type::graph_reference; + + protected: + + task* try_put_task(const output_type &i) { + return my_successors.try_put_task(i); + } + + template friend struct emit_element; + + }; // multifunction_output + +//composite_node +#if __TBB_FLOW_GRAPH_CPP11_FEATURES + template + void add_nodes_impl(CompositeType*, bool) {} + + template< typename CompositeType, typename NodeType1, typename... NodeTypes > + void add_nodes_impl(CompositeType *c_node, bool visible, const NodeType1& n1, const NodeTypes&... n) { + void *addr = const_cast(&n1); + + fgt_alias_port(c_node, addr, visible); + add_nodes_impl(c_node, visible, n...); + } +#endif + +} // internal + +#endif // __TBB__flow_graph_node_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_node_set_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_node_set_impl.h new file mode 100644 index 00000000..f5ec6d0e --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_node_set_impl.h @@ -0,0 +1,269 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_node_set_impl_H +#define __TBB_flow_graph_node_set_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// Included in namespace tbb::flow::interfaceX (in flow_graph.h) + +namespace internal { + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +// Visual Studio 2019 reports an error while calling predecessor_selector::get and successor_selector::get +// Seems like the well-formed expression in trailing decltype is treated as ill-formed +// TODO: investigate problems with decltype in trailing return types or find the cross-platform solution +#define __TBB_MSVC_DISABLE_TRAILING_DECLTYPE (_MSC_VER >= 1900) + +namespace order { +struct undefined {}; +struct following {}; +struct preceding {}; +} + +class get_graph_helper { +public: + // TODO: consider making graph_reference() public and consistent interface to get a reference to the graph + // and remove get_graph_helper + template + static graph& get(const T& object) { + return get_impl(object, std::is_base_of()); + } + +private: + // Get graph from the object of type derived from graph_node + template + static graph& get_impl(const T& object, std::true_type) { + return static_cast(&object)->my_graph; + } + + template + static graph& get_impl(const T& object, std::false_type) { + return object.graph_reference(); + } +}; + +template +struct node_set { + typedef Order order_type; + + tbb::flow::tuple nodes; + node_set(Nodes&... ns) : nodes(ns...) {} + + template + node_set(const node_set& set) : nodes(set.nodes) {} + + graph& graph_reference() const { + return get_graph_helper::get(std::get<0>(nodes)); + } +}; + +namespace alias_helpers { +template using output_type = typename T::output_type; +template using output_ports_type = typename T::output_ports_type; +template using input_type = typename T::input_type; +template using input_ports_type = typename T::input_ports_type; +} // namespace alias_helpers + +template +using has_output_type = tbb::internal::supports; + +template +using has_input_type = tbb::internal::supports; + +template +using has_input_ports_type = tbb::internal::supports; + +template +using has_output_ports_type = tbb::internal::supports; + +template +struct is_sender : std::is_base_of, T> {}; + +template +struct is_receiver : std::is_base_of, T> {}; + +template +struct is_async_node : std::false_type {}; + +template +struct is_async_node> : std::true_type {}; + +template +node_set +follows(FirstPredecessor& first_predecessor, Predecessors&... predecessors) { + __TBB_STATIC_ASSERT((tbb::internal::conjunction, + has_output_type...>::value), + "Not all node's predecessors has output_type typedef"); + __TBB_STATIC_ASSERT((tbb::internal::conjunction, is_sender...>::value), + "Not all node's predecessors are senders"); + return node_set(first_predecessor, predecessors...); +} + +template +node_set +follows(node_set& predecessors_set) { + __TBB_STATIC_ASSERT((tbb::internal::conjunction...>::value), + "Not all nodes in the set has output_type typedef"); + __TBB_STATIC_ASSERT((tbb::internal::conjunction...>::value), + "Not all nodes in the set are senders"); + return node_set(predecessors_set); +} + +template +node_set +precedes(FirstSuccessor& first_successor, Successors&... successors) { + __TBB_STATIC_ASSERT((tbb::internal::conjunction, + has_input_type...>::value), + "Not all node's successors has input_type typedef"); + __TBB_STATIC_ASSERT((tbb::internal::conjunction, is_receiver...>::value), + "Not all node's successors are receivers"); + return node_set(first_successor, successors...); +} + +template +node_set +precedes(node_set& successors_set) { + __TBB_STATIC_ASSERT((tbb::internal::conjunction...>::value), + "Not all nodes in the set has input_type typedef"); + __TBB_STATIC_ASSERT((tbb::internal::conjunction...>::value), + "Not all nodes in the set are receivers"); + return node_set(successors_set); +} + +template +node_set +make_node_set(Node& first_node, Nodes&... nodes) { + return node_set(first_node, nodes...); +} + +template +class successor_selector { + template + static auto get_impl(NodeType& node, std::true_type) -> decltype(input_port(node)) { + return input_port(node); + } + + template + static NodeType& get_impl(NodeType& node, std::false_type) { return node; } + +public: + template +#if __TBB_MSVC_DISABLE_TRAILING_DECLTYPE + static auto& get(NodeType& node) +#else + static auto get(NodeType& node) -> decltype(get_impl(node, has_input_ports_type())) +#endif + { + return get_impl(node, has_input_ports_type()); + } +}; + +template +class predecessor_selector { + template + static auto internal_get(NodeType& node, std::true_type) -> decltype(output_port(node)) { + return output_port(node); + } + + template + static NodeType& internal_get(NodeType& node, std::false_type) { return node;} + + template +#if __TBB_MSVC_DISABLE_TRAILING_DECLTYPE + static auto& get_impl(NodeType& node, std::false_type) +#else + static auto get_impl(NodeType& node, std::false_type) -> decltype(internal_get(node, has_output_ports_type())) +#endif + { + return internal_get(node, has_output_ports_type()); + } + + template + static AsyncNode& get_impl(AsyncNode& node, std::true_type) { return node; } + +public: + template +#if __TBB_MSVC_DISABLE_TRAILING_DECLTYPE + static auto& get(NodeType& node) +#else + static auto get(NodeType& node) -> decltype(get_impl(node, is_async_node())) +#endif + { + return get_impl(node, is_async_node()); + } +}; + +template +class make_edges_helper { +public: + template + static void connect_predecessors(PredecessorsTuple& predecessors, NodeType& node) { + make_edge(std::get(predecessors), successor_selector::get(node)); + make_edges_helper::connect_predecessors(predecessors, node); + } + + template + static void connect_successors(NodeType& node, SuccessorsTuple& successors) { + make_edge(predecessor_selector::get(node), std::get(successors)); + make_edges_helper::connect_successors(node, successors); + } +}; + +template<> +struct make_edges_helper<0> { + template + static void connect_predecessors(PredecessorsTuple& predecessors, NodeType& node) { + make_edge(std::get<0>(predecessors), successor_selector<0>::get(node)); + } + + template + static void connect_successors(NodeType& node, SuccessorsTuple& successors) { + make_edge(predecessor_selector<0>::get(node), std::get<0>(successors)); + } +}; + +// TODO: consider adding an overload for making edges between node sets +template +void make_edges(const node_set& s, NodeType& node) { + const std::size_t SetSize = tbb::flow::tuple_size::value; + make_edges_helper::connect_predecessors(s.nodes, node); +} + +template +void make_edges(NodeType& node, const node_set& s) { + const std::size_t SetSize = tbb::flow::tuple_size::value; + make_edges_helper::connect_successors(node, s.nodes); +} + +template +void make_edges_in_order(const node_set& ns, NodeType& node) { + make_edges(ns, node); +} + +template +void make_edges_in_order(const node_set& ns, NodeType& node) { + make_edges(node, ns); +} + +#endif // __TBB_CPP11_PRESENT + +} // namespace internal + +#endif // __TBB_flow_graph_node_set_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_nodes_deduction.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_nodes_deduction.h new file mode 100644 index 00000000..ffdfae2e --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_nodes_deduction.h @@ -0,0 +1,270 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_nodes_deduction_H +#define __TBB_flow_graph_nodes_deduction_H + +#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT + +namespace tbb { +namespace flow { +namespace interface11 { + +template +struct declare_body_types { + using input_type = Input; + using output_type = Output; +}; + +template struct body_types; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +struct body_types : declare_body_types {}; + +template +using input_t = typename body_types::input_type; + +template +using output_t = typename body_types::output_type; + +template +auto decide_on_operator_overload(Output (T::*name)(const Input&) const)->decltype(name); + +template +auto decide_on_operator_overload(Output (T::*name)(const Input&))->decltype(name); + +template +auto decide_on_operator_overload(Output (T::*name)(Input&) const)->decltype(name); + +template +auto decide_on_operator_overload(Output (T::*name)(Input&))->decltype(name); + +template +auto decide_on_operator_overload(Output (*name)(const Input&))->decltype(name); + +template +auto decide_on_operator_overload(Output (*name)(Input&))->decltype(name); + +template +decltype(decide_on_operator_overload(&Body::operator())) decide_on_callable_type(int); + +template +decltype(decide_on_operator_overload(std::declval())) decide_on_callable_type(...); + +// Deduction guides for Flow Graph nodes +#if TBB_USE_SOURCE_NODE_AS_ALIAS +template +source_node(GraphOrSet&&, Body) +->source_node(0))>>; +#else +template +source_node(GraphOrSet&&, Body, bool = true) +->source_node(0))>>; +#endif + +template +input_node(GraphOrSet&&, Body, bool = true) +->input_node(0))>>; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +template +struct decide_on_set; + +template +struct decide_on_set> { + using type = typename Node::output_type; +}; + +template +struct decide_on_set> { + using type = typename Node::input_type; +}; + +template +using decide_on_set_t = typename decide_on_set>::type; + +template +broadcast_node(const NodeSet&) +->broadcast_node>; + +template +buffer_node(const NodeSet&) +->buffer_node>; + +template +queue_node(const NodeSet&) +->queue_node>; +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +template +sequencer_node(GraphOrProxy&&, Sequencer) +->sequencer_node(0))>>; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +template +priority_queue_node(const NodeSet&, const Compare&) +->priority_queue_node, Compare>; + +template +priority_queue_node(const NodeSet&) +->priority_queue_node, std::less>>; +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +template +struct join_key { + using type = Key; +}; + +template +struct join_key { + using type = T&; +}; + +template +using join_key_t = typename join_key::type; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +template +join_node(const node_set&, Policy) +->join_node, + Policy>; + +template +join_node(const node_set&, Policy) +->join_node; + +template +join_node(const node_set) +->join_node, + queueing>; + +template +join_node(const node_set) +->join_node; +#endif + +template +join_node(GraphOrProxy&&, Body, Bodies...) +->join_node(0))>, + input_t(0))>...>, + key_matching(0))>>>>; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +template +indexer_node(const node_set&) +->indexer_node; +#endif + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +template +limiter_node(const NodeSet&, size_t) +->limiter_node>; + +template +split_node(const node_set&) +->split_node; + +template +split_node(const node_set&) +->split_node>; + +#endif + +template +function_node(GraphOrSet&&, + size_t, Body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy, node_priority_t = tbb::flow::internal::no_priority)) +->function_node(0))>, + output_t(0))>, + Policy>; + +template +function_node(GraphOrSet&&, size_t, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body, node_priority_t = tbb::flow::internal::no_priority)) +->function_node(0))>, + output_t(0))>, + queueing>; + +template +struct continue_output { + using type = Output; +}; + +template <> +struct continue_output { + using type = continue_msg; +}; + +template +using continue_output_t = typename continue_output::type; + +template +continue_node(GraphOrSet&&, Body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy, node_priority_t = tbb::flow::internal::no_priority)) +->continue_node>, + Policy>; + +template +continue_node(GraphOrSet&&, + int, Body, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Policy, node_priority_t = tbb::flow::internal::no_priority)) +->continue_node>, + Policy>; + +template +continue_node(GraphOrSet&&, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body, node_priority_t = tbb::flow::internal::no_priority)) +->continue_node>, + internal::Policy>; + +template +continue_node(GraphOrSet&&, int, + __TBB_FLOW_GRAPH_PRIORITY_ARG1(Body, node_priority_t = tbb::flow::internal::no_priority)) +->continue_node>, + internal::Policy>; + +#if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET + +template +overwrite_node(const NodeSet&) +->overwrite_node>; + +template +write_once_node(const NodeSet&) +->write_once_node>; +#endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +} // namespace interfaceX +} // namespace flow +} // namespace tbb + +#endif // __TBB_CPP17_DEDUCTION_GUIDES_PRESENT +#endif // __TBB_flow_graph_nodes_deduction_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_streaming_node.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_streaming_node.h new file mode 100644 index 00000000..4ef990bf --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_streaming_node.h @@ -0,0 +1,743 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_flow_graph_streaming_H +#define __TBB_flow_graph_streaming_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#if __TBB_PREVIEW_STREAMING_NODE + +// Included in namespace tbb::flow::interfaceX (in flow_graph.h) + +namespace internal { + +template +struct port_ref_impl { + // "+1" since the port_ref range is a closed interval (includes its endpoints). + static const int size = N2 - N1 + 1; +}; + +} // internal + +// The purpose of the port_ref_impl is the pretty syntax: the deduction of a compile-time constant is processed from the return type. +// So it is possible to use this helper without parentheses, e.g. "port_ref<0>". +template +__TBB_DEPRECATED internal::port_ref_impl port_ref() { + return internal::port_ref_impl(); +}; + +namespace internal { + +template +struct num_arguments { + static const int value = 1; +}; + +template +struct num_arguments(*)()> { + static const int value = port_ref_impl::size; +}; + +template +struct num_arguments> { + static const int value = port_ref_impl::size; +}; + +template +void ignore_return_values( Args&&... ) {} + +template +T or_return_values( T&& t ) { return t; } +template +T or_return_values( T&& t, Rest&&... rest ) { + return t | or_return_values( std::forward(rest)... ); +} + +template +struct key_from_policy { + typedef size_t type; + typedef std::false_type is_key_matching; +}; + +template +struct key_from_policy< key_matching > { + typedef Key type; + typedef std::true_type is_key_matching; +}; + +template +struct key_from_policy< key_matching > { + typedef const Key &type; + typedef std::true_type is_key_matching; +}; + +template +class streaming_device_with_key { + Device my_device; + typename std::decay::type my_key; +public: + // TODO: investigate why default constructor is required + streaming_device_with_key() {} + streaming_device_with_key( const Device& d, Key k ) : my_device( d ), my_key( k ) {} + Key key() const { return my_key; } + const Device& device() const { return my_device; } +}; + +// --------- Kernel argument helpers --------- // +template +struct is_port_ref_impl { + typedef std::false_type type; +}; + +template +struct is_port_ref_impl< port_ref_impl > { + typedef std::true_type type; +}; + +template +struct is_port_ref_impl< port_ref_impl( * )() > { + typedef std::true_type type; +}; + +template +struct is_port_ref { + typedef typename is_port_ref_impl< typename tbb::internal::strip::type >::type type; +}; + +template +struct convert_and_call_impl; + +template +struct convert_and_call_impl { + static const size_t my_delta = 1; // Index 0 contains device + + template + static void doit(F& f, Tuple& t, A1& a1, Args1&... args1, Args2&... args2) { + convert_and_call_impl::doit_impl(typename is_port_ref::type(), f, t, a1, args1..., args2...); + } + template + static void doit_impl(std::false_type, F& f, Tuple& t, A1& a1, Args1&... args1, Args2&... args2) { + convert_and_call_impl::doit(f, t, args1..., args2..., a1); + } + template + static void doit_impl(std::true_type x, F& f, Tuple& t, port_ref_impl, Args1&... args1, Args2&... args2) { + convert_and_call_impl, Args1...>::doit_impl(x, f, t, port_ref(), args1..., + args2..., std::get(t)); + } + template + static void doit_impl(std::true_type, F& f, Tuple& t, port_ref_impl, Args1&... args1, Args2&... args2) { + convert_and_call_impl::doit(f, t, args1..., args2..., std::get(t)); + } + + template + static void doit_impl(std::true_type x, F& f, Tuple& t, port_ref_impl(* fn)(), Args1&... args1, Args2&... args2) { + doit_impl(x, f, t, fn(), args1..., args2...); + } + template + static void doit_impl(std::true_type x, F& f, Tuple& t, port_ref_impl(* fn)(), Args1&... args1, Args2&... args2) { + doit_impl(x, f, t, fn(), args1..., args2...); + } +}; + +template <> +struct convert_and_call_impl<> { + template + static void doit(F& f, Tuple&, Args2&... args2) { + f(args2...); + } +}; +// ------------------------------------------- // + +template +struct streaming_node_traits { + // Do not use 'using' instead of 'struct' because Microsoft Visual C++ 12.0 fails to compile. + template + struct async_msg_type { + typedef typename StreamFactory::template async_msg_type type; + }; + + typedef tuple< typename async_msg_type::type... > input_tuple; + typedef input_tuple output_tuple; + typedef tuple< streaming_device_with_key< typename StreamFactory::device_type, typename key_from_policy::type >, + typename async_msg_type::type... > kernel_input_tuple; + + // indexer_node parameters pack expansion workaround for VS2013 for streaming_node + typedef indexer_node< typename async_msg_type::type... > indexer_node_type; +}; + +// Default empty implementation +template +class kernel_executor_helper { + typedef typename StreamFactory::device_type device_type; + typedef typename StreamFactory::kernel_type kernel_type; + typedef KernelInputTuple kernel_input_tuple; +protected: + template + void enqueue_kernel_impl( kernel_input_tuple&, StreamFactory& factory, device_type device, const kernel_type& kernel, Args&... args ) const { + factory.send_kernel( device, kernel, args... ); + } +}; + +// Implementation for StreamFactory supporting range +template +class kernel_executor_helper::type > { + typedef typename StreamFactory::device_type device_type; + typedef typename StreamFactory::kernel_type kernel_type; + typedef KernelInputTuple kernel_input_tuple; + + typedef typename StreamFactory::range_type range_type; + + // Container for randge. It can contain either port references or real range. + struct range_wrapper { + virtual range_type get_range( const kernel_input_tuple &ip ) const = 0; + virtual range_wrapper *clone() const = 0; + virtual ~range_wrapper() {} + }; + + struct range_value : public range_wrapper { + range_value( const range_type& value ) : my_value(value) {} + + range_value( range_type&& value ) : my_value(std::move(value)) {} + + range_type get_range( const kernel_input_tuple & ) const __TBB_override { + return my_value; + } + + range_wrapper *clone() const __TBB_override { + return new range_value(my_value); + } + private: + range_type my_value; + }; + + template + struct range_mapper : public range_wrapper { + range_mapper() {} + + range_type get_range( const kernel_input_tuple &ip ) const __TBB_override { + // "+1" since get<0>(ip) is StreamFactory::device. + return get(ip).data(false); + } + + range_wrapper *clone() const __TBB_override { + return new range_mapper; + } + }; + +protected: + template + void enqueue_kernel_impl( kernel_input_tuple& ip, StreamFactory& factory, device_type device, const kernel_type& kernel, Args&... args ) const { + __TBB_ASSERT(my_range_wrapper, "Range is not set. Call set_range() before running streaming_node."); + factory.send_kernel( device, kernel, my_range_wrapper->get_range(ip), args... ); + } + +public: + kernel_executor_helper() : my_range_wrapper(NULL) {} + + kernel_executor_helper(const kernel_executor_helper& executor) : my_range_wrapper(executor.my_range_wrapper ? executor.my_range_wrapper->clone() : NULL) {} + + kernel_executor_helper(kernel_executor_helper&& executor) : my_range_wrapper(executor.my_range_wrapper) { + // Set moving holder mappers to NULL to prevent double deallocation + executor.my_range_wrapper = NULL; + } + + ~kernel_executor_helper() { + if (my_range_wrapper) delete my_range_wrapper; + } + + void set_range(const range_type& work_size) { + my_range_wrapper = new range_value(work_size); + } + + void set_range(range_type&& work_size) { + my_range_wrapper = new range_value(std::move(work_size)); + } + + template + void set_range(port_ref_impl) { + my_range_wrapper = new range_mapper; + } + + template + void set_range(port_ref_impl(*)()) { + my_range_wrapper = new range_mapper; + } + +private: + range_wrapper* my_range_wrapper; +}; + +} // internal + +/* +/---------------------------------------- streaming_node ------------------------------------\ +| | +| /--------------\ /----------------------\ /-----------\ /----------------------\ | +| | | | (device_with_key) O---O | | | | +| | | | | | | | | | +O---O indexer_node O---O device_selector_node O---O join_node O---O kernel_node O---O +| | | | (multifunction_node) | | | | (multifunction_node) | | +O---O | | O---O | | O---O +| \--------------/ \----------------------/ \-----------/ \----------------------/ | +| | +\--------------------------------------------------------------------------------------------/ +*/ +template +class __TBB_DEPRECATED streaming_node; + +template +class __TBB_DEPRECATED +streaming_node< tuple, JP, StreamFactory > + : public composite_node < typename internal::streaming_node_traits::input_tuple, + typename internal::streaming_node_traits::output_tuple > + , public internal::kernel_executor_helper< StreamFactory, typename internal::streaming_node_traits::kernel_input_tuple > +{ + typedef typename internal::streaming_node_traits::input_tuple input_tuple; + typedef typename internal::streaming_node_traits::output_tuple output_tuple; + typedef typename internal::key_from_policy::type key_type; +protected: + typedef typename StreamFactory::device_type device_type; + typedef typename StreamFactory::kernel_type kernel_type; +private: + typedef internal::streaming_device_with_key device_with_key_type; + typedef composite_node base_type; + static const size_t NUM_INPUTS = tuple_size::value; + static const size_t NUM_OUTPUTS = tuple_size::value; + + typedef typename internal::make_sequence::type input_sequence; + typedef typename internal::make_sequence::type output_sequence; + + typedef typename internal::streaming_node_traits::indexer_node_type indexer_node_type; + typedef typename indexer_node_type::output_type indexer_node_output_type; + typedef typename internal::streaming_node_traits::kernel_input_tuple kernel_input_tuple; + typedef multifunction_node device_selector_node; + typedef multifunction_node kernel_multifunction_node; + + template + typename base_type::input_ports_type get_input_ports( internal::sequence ) { + return std::tie( internal::input_port( my_indexer_node )... ); + } + + template + typename base_type::output_ports_type get_output_ports( internal::sequence ) { + return std::tie( internal::output_port( my_kernel_node )... ); + } + + typename base_type::input_ports_type get_input_ports() { + return get_input_ports( input_sequence() ); + } + + typename base_type::output_ports_type get_output_ports() { + return get_output_ports( output_sequence() ); + } + + template + int make_Nth_edge() { + make_edge( internal::output_port( my_device_selector_node ), internal::input_port( my_join_node ) ); + return 0; + } + + template + void make_edges( internal::sequence ) { + make_edge( my_indexer_node, my_device_selector_node ); + make_edge( my_device_selector_node, my_join_node ); + internal::ignore_return_values( make_Nth_edge()... ); + make_edge( my_join_node, my_kernel_node ); + } + + void make_edges() { + make_edges( input_sequence() ); + } + + class device_selector_base { + public: + virtual void operator()( const indexer_node_output_type &v, typename device_selector_node::output_ports_type &op ) = 0; + virtual device_selector_base *clone( streaming_node &n ) const = 0; + virtual ~device_selector_base() {} + }; + + template + class device_selector : public device_selector_base, tbb::internal::no_assign { + public: + device_selector( UserFunctor uf, streaming_node &n, StreamFactory &f ) + : my_dispatch_funcs( create_dispatch_funcs( input_sequence() ) ) + , my_user_functor( uf ), my_node(n), my_factory( f ) + { + my_port_epoches.fill( 0 ); + } + + void operator()( const indexer_node_output_type &v, typename device_selector_node::output_ports_type &op ) __TBB_override { + (this->*my_dispatch_funcs[ v.tag() ])( my_port_epoches[ v.tag() ], v, op ); + __TBB_ASSERT( (tbb::internal::is_same_type::is_key_matching, std::false_type>::value) + || my_port_epoches[v.tag()] == 0, "Epoch is changed when key matching is requested" ); + } + + device_selector_base *clone( streaming_node &n ) const __TBB_override { + return new device_selector( my_user_functor, n, my_factory ); + } + private: + typedef void(device_selector::*send_and_put_fn_type)(size_t &, const indexer_node_output_type &, typename device_selector_node::output_ports_type &); + typedef std::array < send_and_put_fn_type, NUM_INPUTS > dispatch_funcs_type; + + template + static dispatch_funcs_type create_dispatch_funcs( internal::sequence ) { + dispatch_funcs_type dispatch = { { &device_selector::send_and_put_impl... } }; + return dispatch; + } + + template + key_type get_key( std::false_type, const T &, size_t &epoch ) { + __TBB_STATIC_ASSERT( (tbb::internal::is_same_type::value), "" ); + return epoch++; + } + + template + key_type get_key( std::true_type, const T &t, size_t &/*epoch*/ ) { + using tbb::flow::key_from_message; + return key_from_message( t ); + } + + template + void send_and_put_impl( size_t &epoch, const indexer_node_output_type &v, typename device_selector_node::output_ports_type &op ) { + typedef typename tuple_element::type::output_type elem_type; + elem_type e = internal::cast_to( v ); + device_type device = get_device( get_key( typename internal::key_from_policy::is_key_matching(), e, epoch ), get<0>( op ) ); + my_factory.send_data( device, e ); + get( op ).try_put( e ); + } + + template< typename DevicePort > + device_type get_device( key_type key, DevicePort& dp ) { + typename std::unordered_map::type, epoch_desc>::iterator it = my_devices.find( key ); + if ( it == my_devices.end() ) { + device_type d = my_user_functor( my_factory ); + std::tie( it, std::ignore ) = my_devices.insert( std::make_pair( key, d ) ); + bool res = dp.try_put( device_with_key_type( d, key ) ); + __TBB_ASSERT_EX( res, NULL ); + my_node.notify_new_device( d ); + } + epoch_desc &e = it->second; + device_type d = e.my_device; + if ( ++e.my_request_number == NUM_INPUTS ) my_devices.erase( it ); + return d; + } + + struct epoch_desc { + epoch_desc(device_type d ) : my_device( d ), my_request_number( 0 ) {} + device_type my_device; + size_t my_request_number; + }; + + std::unordered_map::type, epoch_desc> my_devices; + std::array my_port_epoches; + dispatch_funcs_type my_dispatch_funcs; + UserFunctor my_user_functor; + streaming_node &my_node; + StreamFactory &my_factory; + }; + + class device_selector_body { + public: + device_selector_body( device_selector_base *d ) : my_device_selector( d ) {} + + void operator()( const indexer_node_output_type &v, typename device_selector_node::output_ports_type &op ) { + (*my_device_selector)(v, op); + } + private: + device_selector_base *my_device_selector; + }; + + // TODO: investigate why copy-construction is disallowed + class args_storage_base : tbb::internal::no_copy { + public: + typedef typename kernel_multifunction_node::output_ports_type output_ports_type; + + virtual void enqueue( kernel_input_tuple &ip, output_ports_type &op, const streaming_node &n ) = 0; + virtual void send( device_type d ) = 0; + virtual args_storage_base *clone() const = 0; + virtual ~args_storage_base () {} + + protected: + args_storage_base( const kernel_type& kernel, StreamFactory &f ) + : my_kernel( kernel ), my_factory( f ) + {} + + args_storage_base( const args_storage_base &k ) + : tbb::internal::no_copy(), my_kernel( k.my_kernel ), my_factory( k.my_factory ) + {} + + const kernel_type my_kernel; + StreamFactory &my_factory; + }; + + template + class args_storage : public args_storage_base { + typedef typename args_storage_base::output_ports_type output_ports_type; + + // ---------- Update events helpers ---------- // + template + bool do_try_put( const kernel_input_tuple& ip, output_ports_type &op ) const { + const auto& t = get( ip ); + auto &port = get( op ); + return port.try_put( t ); + } + + template + bool do_try_put( const kernel_input_tuple& ip, output_ports_type &op, internal::sequence ) const { + return internal::or_return_values( do_try_put( ip, op )... ); + } + + // ------------------------------------------- // + class run_kernel_func : tbb::internal::no_assign { + public: + run_kernel_func( kernel_input_tuple &ip, const streaming_node &node, const args_storage& storage ) + : my_kernel_func( ip, node, storage, get<0>(ip).device() ) {} + + // It is immpossible to use Args... because a function pointer cannot be casted to a function reference implicitly. + // Allow the compiler to deduce types for function pointers automatically. + template + void operator()( FnArgs&... args ) { + internal::convert_and_call_impl::doit( my_kernel_func, my_kernel_func.my_ip, args... ); + } + private: + struct kernel_func : tbb::internal::no_copy { + kernel_input_tuple &my_ip; + const streaming_node &my_node; + const args_storage& my_storage; + device_type my_device; + + kernel_func( kernel_input_tuple &ip, const streaming_node &node, const args_storage& storage, device_type device ) + : my_ip( ip ), my_node( node ), my_storage( storage ), my_device( device ) + {} + + template + void operator()( FnArgs&... args ) { + my_node.enqueue_kernel( my_ip, my_storage.my_factory, my_device, my_storage.my_kernel, args... ); + } + } my_kernel_func; + }; + + template + class run_finalize_func : tbb::internal::no_assign { + public: + run_finalize_func( kernel_input_tuple &ip, StreamFactory &factory, FinalizeFn fn ) + : my_ip( ip ), my_finalize_func( factory, get<0>(ip).device(), fn ) {} + + // It is immpossible to use Args... because a function pointer cannot be casted to a function reference implicitly. + // Allow the compiler to deduce types for function pointers automatically. + template + void operator()( FnArgs&... args ) { + internal::convert_and_call_impl::doit( my_finalize_func, my_ip, args... ); + } + private: + kernel_input_tuple &my_ip; + + struct finalize_func : tbb::internal::no_assign { + StreamFactory &my_factory; + device_type my_device; + FinalizeFn my_fn; + + finalize_func( StreamFactory &factory, device_type device, FinalizeFn fn ) + : my_factory(factory), my_device(device), my_fn(fn) {} + + template + void operator()( FnArgs&... args ) { + my_factory.finalize( my_device, my_fn, args... ); + } + } my_finalize_func; + }; + + template + static run_finalize_func make_run_finalize_func( kernel_input_tuple &ip, StreamFactory &factory, FinalizeFn fn ) { + return run_finalize_func( ip, factory, fn ); + } + + class send_func : tbb::internal::no_assign { + public: + send_func( StreamFactory &factory, device_type d ) + : my_factory(factory), my_device( d ) {} + + template + void operator()( FnArgs&... args ) { + my_factory.send_data( my_device, args... ); + } + private: + StreamFactory &my_factory; + device_type my_device; + }; + + public: + args_storage( const kernel_type& kernel, StreamFactory &f, Args&&... args ) + : args_storage_base( kernel, f ) + , my_args_pack( std::forward(args)... ) + {} + + args_storage( const args_storage &k ) : args_storage_base( k ), my_args_pack( k.my_args_pack ) {} + + args_storage( const args_storage_base &k, Args&&... args ) : args_storage_base( k ), my_args_pack( std::forward(args)... ) {} + + void enqueue( kernel_input_tuple &ip, output_ports_type &op, const streaming_node &n ) __TBB_override { + // Make const qualified args_pack (from non-const) + const args_pack_type& const_args_pack = my_args_pack; + // factory.enqure_kernel() gets + // - 'ip' tuple elements by reference and updates it (and 'ip') with dependencies + // - arguments (from my_args_pack) by const-reference via const_args_pack + tbb::internal::call( run_kernel_func( ip, n, *this ), const_args_pack ); + + if (! do_try_put( ip, op, input_sequence() ) ) { + graph& g = n.my_graph; + // No one message was passed to successors so set a callback to extend the graph lifetime until the kernel completion. + g.increment_wait_count(); + + // factory.finalize() gets + // - 'ip' tuple elements by reference, so 'ip' might be changed + // - arguments (from my_args_pack) by const-reference via const_args_pack + tbb::internal::call( make_run_finalize_func(ip, this->my_factory, [&g] { + g.decrement_wait_count(); + }), const_args_pack ); + } + } + + void send( device_type d ) __TBB_override { + // factory.send() gets arguments by reference and updates these arguments with dependencies + // (it gets but usually ignores port_ref-s) + tbb::internal::call( send_func( this->my_factory, d ), my_args_pack ); + } + + args_storage_base *clone() const __TBB_override { + // Create new args_storage with copying constructor. + return new args_storage( *this ); + } + + private: + typedef tbb::internal::stored_pack args_pack_type; + args_pack_type my_args_pack; + }; + + // Body for kernel_multifunction_node. + class kernel_body : tbb::internal::no_assign { + public: + kernel_body( const streaming_node &node ) : my_node( node ) {} + + void operator()( kernel_input_tuple ip, typename args_storage_base::output_ports_type &op ) { + __TBB_ASSERT( (my_node.my_args_storage != NULL), "No arguments storage" ); + // 'ip' is passed by value to create local copy for updating inside enqueue_kernel() + my_node.my_args_storage->enqueue( ip, op, my_node ); + } + private: + const streaming_node &my_node; + }; + + template ::type > + struct wrap_to_async { + typedef T type; // Keep port_ref as it is + }; + + template + struct wrap_to_async { + typedef typename StreamFactory::template async_msg_type< typename tbb::internal::strip::type > type; + }; + + template + args_storage_base *make_args_storage(const args_storage_base& storage, Args&&... args) const { + // In this variadic template convert all simple types 'T' into 'async_msg_type' + return new args_storage(storage, std::forward(args)...); + } + + void notify_new_device( device_type d ) { + my_args_storage->send( d ); + } + + template + void enqueue_kernel( kernel_input_tuple& ip, StreamFactory& factory, device_type device, const kernel_type& kernel, Args&... args ) const { + this->enqueue_kernel_impl( ip, factory, device, kernel, args... ); + } + +public: + template + streaming_node( graph &g, const kernel_type& kernel, DeviceSelector d, StreamFactory &f ) + : base_type( g ) + , my_indexer_node( g ) + , my_device_selector( new device_selector( d, *this, f ) ) + , my_device_selector_node( g, serial, device_selector_body( my_device_selector ) ) + , my_join_node( g ) + , my_kernel_node( g, serial, kernel_body( *this ) ) + // By default, streaming_node maps all its ports to the kernel arguments on a one-to-one basis. + , my_args_storage( make_args_storage( args_storage<>(kernel, f), port_ref<0, NUM_INPUTS - 1>() ) ) + { + base_type::set_external_ports( get_input_ports(), get_output_ports() ); + make_edges(); + } + + streaming_node( const streaming_node &node ) + : base_type( node.my_graph ) + , my_indexer_node( node.my_indexer_node ) + , my_device_selector( node.my_device_selector->clone( *this ) ) + , my_device_selector_node( node.my_graph, serial, device_selector_body( my_device_selector ) ) + , my_join_node( node.my_join_node ) + , my_kernel_node( node.my_graph, serial, kernel_body( *this ) ) + , my_args_storage( node.my_args_storage->clone() ) + { + base_type::set_external_ports( get_input_ports(), get_output_ports() ); + make_edges(); + } + + streaming_node( streaming_node &&node ) + : base_type( node.my_graph ) + , my_indexer_node( std::move( node.my_indexer_node ) ) + , my_device_selector( node.my_device_selector->clone(*this) ) + , my_device_selector_node( node.my_graph, serial, device_selector_body( my_device_selector ) ) + , my_join_node( std::move( node.my_join_node ) ) + , my_kernel_node( node.my_graph, serial, kernel_body( *this ) ) + , my_args_storage( node.my_args_storage ) + { + base_type::set_external_ports( get_input_ports(), get_output_ports() ); + make_edges(); + // Set moving node mappers to NULL to prevent double deallocation. + node.my_args_storage = NULL; + } + + ~streaming_node() { + if ( my_args_storage ) delete my_args_storage; + if ( my_device_selector ) delete my_device_selector; + } + + template + void set_args( Args&&... args ) { + // Copy the base class of args_storage and create new storage for "Args...". + args_storage_base * const new_args_storage = make_args_storage( *my_args_storage, typename wrap_to_async::type(std::forward(args))...); + delete my_args_storage; + my_args_storage = new_args_storage; + } + +protected: + void reset_node( reset_flags = rf_reset_protocol ) __TBB_override { __TBB_ASSERT( false, "Not implemented yet" ); } + +private: + indexer_node_type my_indexer_node; + device_selector_base *my_device_selector; + device_selector_node my_device_selector_node; + join_node my_join_node; + kernel_multifunction_node my_kernel_node; + + args_storage_base *my_args_storage; +}; + +#endif // __TBB_PREVIEW_STREAMING_NODE +#endif // __TBB_flow_graph_streaming_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_tagged_buffer_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_tagged_buffer_impl.h new file mode 100644 index 00000000..92291129 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_tagged_buffer_impl.h @@ -0,0 +1,249 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// a hash table buffer that can expand, and can support as many deletions as +// additions, list-based, with elements of list held in array (for destruction +// management), multiplicative hashing (like ets). No synchronization built-in. +// + +#ifndef __TBB__flow_graph_hash_buffer_impl_H +#define __TBB__flow_graph_hash_buffer_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::flow::interfaceX::internal + +// elements in the table are a simple list; we need pointer to next element to +// traverse the chain +template +struct buffer_element_type { + // the second parameter below is void * because we can't forward-declare the type + // itself, so we just reinterpret_cast below. + typedef typename aligned_pair::type type; +}; + +template + < + typename Key, // type of key within ValueType + typename ValueType, + typename ValueToKey, // abstract method that returns "const Key" or "const Key&" given ValueType + typename HashCompare, // has hash and equal + typename Allocator=tbb::cache_aligned_allocator< typename aligned_pair::type > + > +class hash_buffer : public HashCompare { +public: + static const size_t INITIAL_SIZE = 8; // initial size of the hash pointer table + typedef ValueType value_type; + typedef typename buffer_element_type< value_type >::type element_type; + typedef value_type *pointer_type; + typedef element_type *list_array_type; // array we manage manually + typedef list_array_type *pointer_array_type; + typedef typename Allocator::template rebind::other pointer_array_allocator_type; + typedef typename Allocator::template rebind::other elements_array_allocator; + typedef typename tbb::internal::strip::type Knoref; + +private: + ValueToKey *my_key; + size_t my_size; + size_t nelements; + pointer_array_type pointer_array; // pointer_array[my_size] + list_array_type elements_array; // elements_array[my_size / 2] + element_type* free_list; + + size_t mask() { return my_size - 1; } + + void set_up_free_list( element_type **p_free_list, list_array_type la, size_t sz) { + for(size_t i=0; i < sz - 1; ++i ) { // construct free list + la[i].second = &(la[i+1]); + } + la[sz-1].second = NULL; + *p_free_list = (element_type *)&(la[0]); + } + + // cleanup for exceptions + struct DoCleanup { + pointer_array_type *my_pa; + list_array_type *my_elements; + size_t my_size; + + DoCleanup(pointer_array_type &pa, list_array_type &my_els, size_t sz) : + my_pa(&pa), my_elements(&my_els), my_size(sz) { } + ~DoCleanup() { + if(my_pa) { + size_t dont_care = 0; + internal_free_buffer(*my_pa, *my_elements, my_size, dont_care); + } + } + }; + + // exception-safety requires we do all the potentially-throwing operations first + void grow_array() { + size_t new_size = my_size*2; + size_t new_nelements = nelements; // internal_free_buffer zeroes this + list_array_type new_elements_array = NULL; + pointer_array_type new_pointer_array = NULL; + list_array_type new_free_list = NULL; + { + DoCleanup my_cleanup(new_pointer_array, new_elements_array, new_size); + new_elements_array = elements_array_allocator().allocate(my_size); + new_pointer_array = pointer_array_allocator_type().allocate(new_size); + for(size_t i=0; i < new_size; ++i) new_pointer_array[i] = NULL; + set_up_free_list(&new_free_list, new_elements_array, my_size ); + + for(size_t i=0; i < my_size; ++i) { + for( element_type* op = pointer_array[i]; op; op = (element_type *)(op->second)) { + value_type *ov = reinterpret_cast(&(op->first)); + // could have std::move semantics + internal_insert_with_key(new_pointer_array, new_size, new_free_list, *ov); + } + } + my_cleanup.my_pa = NULL; + my_cleanup.my_elements = NULL; + } + + internal_free_buffer(pointer_array, elements_array, my_size, nelements); + free_list = new_free_list; + pointer_array = new_pointer_array; + elements_array = new_elements_array; + my_size = new_size; + nelements = new_nelements; + } + + // v should have perfect forwarding if std::move implemented. + // we use this method to move elements in grow_array, so can't use class fields + void internal_insert_with_key( element_type **p_pointer_array, size_t p_sz, list_array_type &p_free_list, + const value_type &v) { + size_t l_mask = p_sz-1; + __TBB_ASSERT(my_key, "Error: value-to-key functor not provided"); + size_t h = this->hash((*my_key)(v)) & l_mask; + __TBB_ASSERT(p_free_list, "Error: free list not set up."); + element_type* my_elem = p_free_list; p_free_list = (element_type *)(p_free_list->second); + (void) new(&(my_elem->first)) value_type(v); + my_elem->second = p_pointer_array[h]; + p_pointer_array[h] = my_elem; + } + + void internal_initialize_buffer() { + pointer_array = pointer_array_allocator_type().allocate(my_size); + for(size_t i = 0; i < my_size; ++i) pointer_array[i] = NULL; + elements_array = elements_array_allocator().allocate(my_size / 2); + set_up_free_list(&free_list, elements_array, my_size / 2); + } + + // made static so an enclosed class can use to properly dispose of the internals + static void internal_free_buffer( pointer_array_type &pa, list_array_type &el, size_t &sz, size_t &ne ) { + if(pa) { + for(size_t i = 0; i < sz; ++i ) { + element_type *p_next; + for( element_type *p = pa[i]; p; p = p_next) { + p_next = (element_type *)p->second; + internal::punned_cast(&(p->first))->~value_type(); + } + } + pointer_array_allocator_type().deallocate(pa, sz); + pa = NULL; + } + // Separate test (if allocation of pa throws, el may be allocated. + // but no elements will be constructed.) + if(el) { + elements_array_allocator().deallocate(el, sz / 2); + el = NULL; + } + sz = INITIAL_SIZE; + ne = 0; + } + +public: + hash_buffer() : my_key(NULL), my_size(INITIAL_SIZE), nelements(0) { + internal_initialize_buffer(); + } + + ~hash_buffer() { + internal_free_buffer(pointer_array, elements_array, my_size, nelements); + if(my_key) delete my_key; + } + + void reset() { + internal_free_buffer(pointer_array, elements_array, my_size, nelements); + internal_initialize_buffer(); + } + + // Take ownership of func object allocated with new. + // This method is only used internally, so can't be misused by user. + void set_key_func(ValueToKey *vtk) { my_key = vtk; } + // pointer is used to clone() + ValueToKey* get_key_func() { return my_key; } + + bool insert_with_key(const value_type &v) { + pointer_type p = NULL; + __TBB_ASSERT(my_key, "Error: value-to-key functor not provided"); + if(find_ref_with_key((*my_key)(v), p)) { + p->~value_type(); + (void) new(p) value_type(v); // copy-construct into the space + return false; + } + ++nelements; + if(nelements*2 > my_size) grow_array(); + internal_insert_with_key(pointer_array, my_size, free_list, v); + return true; + } + + // returns true and sets v to array element if found, else returns false. + bool find_ref_with_key(const Knoref& k, pointer_type &v) { + size_t i = this->hash(k) & mask(); + for(element_type* p = pointer_array[i]; p; p = (element_type *)(p->second)) { + pointer_type pv = reinterpret_cast(&(p->first)); + __TBB_ASSERT(my_key, "Error: value-to-key functor not provided"); + if(this->equal((*my_key)(*pv), k)) { + v = pv; + return true; + } + } + return false; + } + + bool find_with_key( const Knoref& k, value_type &v) { + value_type *p; + if(find_ref_with_key(k, p)) { + v = *p; + return true; + } + else + return false; + } + + void delete_with_key(const Knoref& k) { + size_t h = this->hash(k) & mask(); + element_type* prev = NULL; + for(element_type* p = pointer_array[h]; p; prev = p, p = (element_type *)(p->second)) { + value_type *vp = reinterpret_cast(&(p->first)); + __TBB_ASSERT(my_key, "Error: value-to-key functor not provided"); + if(this->equal((*my_key)(*vp), k)) { + vp->~value_type(); + if(prev) prev->second = p->second; + else pointer_array[h] = (element_type *)(p->second); + p->second = free_list; + free_list = p; + --nelements; + return; + } + } + __TBB_ASSERT(false, "key not found for delete"); + } +}; +#endif // __TBB__flow_graph_hash_buffer_impl_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_trace_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_trace_impl.h new file mode 100644 index 00000000..65809c39 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_trace_impl.h @@ -0,0 +1,364 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _FGT_GRAPH_TRACE_IMPL_H +#define _FGT_GRAPH_TRACE_IMPL_H + +#include "../tbb_profiling.h" +#if (_MSC_VER >= 1900) + #include +#endif + +namespace tbb { + namespace internal { + +#if TBB_USE_THREADING_TOOLS + #if TBB_PREVIEW_FLOW_GRAPH_TRACE + #if (_MSC_VER >= 1900) + #define CODEPTR() (_ReturnAddress()) + #elif __TBB_GCC_VERSION >= 40800 + #define CODEPTR() ( __builtin_return_address(0)) + #else + #define CODEPTR() NULL + #endif + #else + #define CODEPTR() NULL + #endif /* TBB_PREVIEW_FLOW_GRAPH_TRACE */ + +static inline void fgt_alias_port(void *node, void *p, bool visible) { + if(visible) + itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_NODE ); + else + itt_relation_add( ITT_DOMAIN_FLOW, p, FLOW_NODE, __itt_relation_is_child_of, node, FLOW_NODE ); +} + +static inline void fgt_composite ( void* codeptr, void *node, void *graph ) { + itt_make_task_group( ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_COMPOSITE_NODE ); + suppress_unused_warning( codeptr ); +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + if (codeptr != NULL) { + register_node_addr(ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif +} + +static inline void fgt_internal_alias_input_port( void *node, void *p, string_index name_index ) { + itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); + itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_INPUT_PORT ); +} + +static inline void fgt_internal_alias_output_port( void *node, void *p, string_index name_index ) { + itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index ); + itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_OUTPUT_PORT ); +} + +template +void alias_input_port(void *node, tbb::flow::receiver* port, string_index name_index) { + // TODO: Make fgt_internal_alias_input_port a function template? + fgt_internal_alias_input_port( node, port, name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_input_alias_helper { + static void alias_port( void *node, PortsTuple &ports ) { + alias_input_port( node, &(tbb::flow::get(ports)), static_cast(FLOW_INPUT_PORT_0 + N - 1) ); + fgt_internal_input_alias_helper::alias_port( node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_input_alias_helper { + static void alias_port( void * /* node */, PortsTuple & /* ports */ ) { } +}; + +template +void alias_output_port(void *node, tbb::flow::sender* port, string_index name_index) { + // TODO: Make fgt_internal_alias_output_port a function template? + fgt_internal_alias_output_port( node, static_cast(port), name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_output_alias_helper { + static void alias_port( void *node, PortsTuple &ports ) { + alias_output_port( node, &(tbb::flow::get(ports)), static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); + fgt_internal_output_alias_helper::alias_port( node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_output_alias_helper { + static void alias_port( void * /*node*/, PortsTuple &/*ports*/ ) { + } +}; + +static inline void fgt_internal_create_input_port( void *node, void *p, string_index name_index ) { + itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); +} + +static inline void fgt_internal_create_output_port( void* codeptr, void *node, void *p, string_index name_index ) { + itt_make_task_group(ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index); + suppress_unused_warning( codeptr ); +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + if (codeptr != NULL) { + register_node_addr(ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif +} + +template +void register_input_port(void *node, tbb::flow::receiver* port, string_index name_index) { + // TODO: Make fgt_internal_create_input_port a function template? + // In C++03 dependent name lookup from the template definition context + // works only for function declarations with external linkage: + // http://www.open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#561 + fgt_internal_create_input_port(node, static_cast(port), name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_input_helper { + static void register_port( void *node, PortsTuple &ports ) { + register_input_port( node, &(tbb::flow::get(ports)), static_cast(FLOW_INPUT_PORT_0 + N - 1) ); + fgt_internal_input_helper::register_port( node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_input_helper { + static void register_port( void *node, PortsTuple &ports ) { + register_input_port( node, &(tbb::flow::get<0>(ports)), FLOW_INPUT_PORT_0 ); + } +}; + +template +void register_output_port(void* codeptr, void *node, tbb::flow::sender* port, string_index name_index) { + // TODO: Make fgt_internal_create_output_port a function template? + fgt_internal_create_output_port( codeptr, node, static_cast(port), name_index); +} + +template < typename PortsTuple, int N > +struct fgt_internal_output_helper { + static void register_port( void* codeptr, void *node, PortsTuple &ports ) { + register_output_port( codeptr, node, &(tbb::flow::get(ports)), static_cast(FLOW_OUTPUT_PORT_0 + N - 1) ); + fgt_internal_output_helper::register_port( codeptr, node, ports ); + } +}; + +template < typename PortsTuple > +struct fgt_internal_output_helper { + static void register_port( void* codeptr, void *node, PortsTuple &ports ) { + register_output_port( codeptr, node, &(tbb::flow::get<0>(ports)), FLOW_OUTPUT_PORT_0 ); + } +}; + +template< typename NodeType > +void fgt_multioutput_node_desc( const NodeType *node, const char *desc ) { + void *addr = (void *)( static_cast< tbb::flow::receiver< typename NodeType::input_type > * >(const_cast< NodeType *>(node)) ); + itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); +} + +template< typename NodeType > +void fgt_multiinput_multioutput_node_desc( const NodeType *node, const char *desc ) { + void *addr = const_cast(node); + itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); +} + +template< typename NodeType > +static inline void fgt_node_desc( const NodeType *node, const char *desc ) { + void *addr = (void *)( static_cast< tbb::flow::sender< typename NodeType::output_type > * >(const_cast< NodeType *>(node)) ); + itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); +} + +static inline void fgt_graph_desc( void *g, const char *desc ) { + itt_metadata_str_add( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, FLOW_OBJECT_NAME, desc ); +} + +static inline void fgt_body( void *node, void *body ) { + itt_relation_add( ITT_DOMAIN_FLOW, body, FLOW_BODY, __itt_relation_is_child_of, node, FLOW_NODE ); +} + +template< int N, typename PortsTuple > +static inline void fgt_multioutput_node(void* codeptr, string_index t, void *g, void *input_port, PortsTuple &ports ) { + itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); + fgt_internal_output_helper::register_port(codeptr, input_port, ports ); +} + +template< int N, typename PortsTuple > +static inline void fgt_multioutput_node_with_body( void* codeptr, string_index t, void *g, void *input_port, PortsTuple &ports, void *body ) { + itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); + fgt_internal_output_helper::register_port( codeptr, input_port, ports ); + fgt_body( input_port, body ); +} + +template< int N, typename PortsTuple > +static inline void fgt_multiinput_node( void* codeptr, string_index t, void *g, PortsTuple &ports, void *output_port) { + itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_output_port( codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); + fgt_internal_input_helper::register_port( output_port, ports ); +} + +static inline void fgt_multiinput_multioutput_node( void* codeptr, string_index t, void *n, void *g ) { + itt_make_task_group( ITT_DOMAIN_FLOW, n, FLOW_NODE, g, FLOW_GRAPH, t ); + suppress_unused_warning( codeptr ); +#if TBB_PREVIEW_FLOW_GRAPH_TRACE + if (codeptr != NULL) { + register_node_addr(ITT_DOMAIN_FLOW, n, FLOW_NODE, CODE_ADDRESS, &codeptr); + } +#endif +} + +static inline void fgt_node( void* codeptr, string_index t, void *g, void *output_port ) { + itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_output_port( codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); +} + +static void fgt_node_with_body( void* codeptr, string_index t, void *g, void *output_port, void *body ) { + itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + fgt_internal_create_output_port(codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); + fgt_body( output_port, body ); +} + +static inline void fgt_node( void* codeptr, string_index t, void *g, void *input_port, void *output_port ) { + fgt_node( codeptr, t, g, output_port ); + fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); +} + +static inline void fgt_node_with_body( void* codeptr, string_index t, void *g, void *input_port, void *output_port, void *body ) { + fgt_node_with_body( codeptr, t, g, output_port, body ); + fgt_internal_create_input_port( output_port, input_port, FLOW_INPUT_PORT_0 ); +} + + +static inline void fgt_node( void* codeptr, string_index t, void *g, void *input_port, void *decrement_port, void *output_port ) { + fgt_node( codeptr, t, g, input_port, output_port ); + fgt_internal_create_input_port( output_port, decrement_port, FLOW_INPUT_PORT_1 ); +} + +static inline void fgt_make_edge( void *output_port, void *input_port ) { + itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_predecessor_to, input_port, FLOW_INPUT_PORT); +} + +static inline void fgt_remove_edge( void *output_port, void *input_port ) { + itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_sibling_of, input_port, FLOW_INPUT_PORT); +} + +static inline void fgt_graph( void *g ) { + itt_make_task_group( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, NULL, FLOW_NULL, FLOW_GRAPH ); +} + +static inline void fgt_begin_body( void *body ) { + itt_task_begin( ITT_DOMAIN_FLOW, body, FLOW_BODY, NULL, FLOW_NULL, FLOW_BODY ); +} + +static inline void fgt_end_body( void * ) { + itt_task_end( ITT_DOMAIN_FLOW ); +} + +static inline void fgt_async_try_put_begin( void *node, void *port ) { + itt_task_begin( ITT_DOMAIN_FLOW, port, FLOW_OUTPUT_PORT, node, FLOW_NODE, FLOW_OUTPUT_PORT ); +} + +static inline void fgt_async_try_put_end( void *, void * ) { + itt_task_end( ITT_DOMAIN_FLOW ); +} + +static inline void fgt_async_reserve( void *node, void *graph ) { + itt_region_begin( ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_NULL ); +} + +static inline void fgt_async_commit( void *node, void * /*graph*/) { + itt_region_end( ITT_DOMAIN_FLOW, node, FLOW_NODE ); +} + +static inline void fgt_reserve_wait( void *graph ) { + itt_region_begin( ITT_DOMAIN_FLOW, graph, FLOW_GRAPH, NULL, FLOW_NULL, FLOW_NULL ); +} + +static inline void fgt_release_wait( void *graph ) { + itt_region_end( ITT_DOMAIN_FLOW, graph, FLOW_GRAPH ); +} + +#else // TBB_USE_THREADING_TOOLS + +#define CODEPTR() NULL + +static inline void fgt_alias_port(void * /*node*/, void * /*p*/, bool /*visible*/ ) { } + +static inline void fgt_composite ( void* /*codeptr*/, void * /*node*/, void * /*graph*/ ) { } + +static inline void fgt_graph( void * /*g*/ ) { } + +template< typename NodeType > +static inline void fgt_multioutput_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } + +template< typename NodeType > +static inline void fgt_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } + +static inline void fgt_graph_desc( void * /*g*/, const char * /*desc*/ ) { } + +static inline void fgt_body( void * /*node*/, void * /*body*/ ) { } + +template< int N, typename PortsTuple > +static inline void fgt_multioutput_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/ ) { } + +template< int N, typename PortsTuple > +static inline void fgt_multioutput_node_with_body( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, PortsTuple & /*ports*/, void * /*body*/ ) { } + +template< int N, typename PortsTuple > +static inline void fgt_multiinput_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, PortsTuple & /*ports*/, void * /*output_port*/ ) { } + +static inline void fgt_multiinput_multioutput_node( void* /*codeptr*/, string_index /*t*/, void * /*node*/, void * /*graph*/ ) { } + +static inline void fgt_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*output_port*/ ) { } +static inline void fgt_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/ ) { } +static inline void fgt_node( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*decrement_port*/, void * /*output_port*/ ) { } + +static inline void fgt_node_with_body( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*output_port*/, void * /*body*/ ) { } +static inline void fgt_node_with_body( void* /*codeptr*/, string_index /*t*/, void * /*g*/, void * /*input_port*/, void * /*output_port*/, void * /*body*/ ) { } + +static inline void fgt_make_edge( void * /*output_port*/, void * /*input_port*/ ) { } +static inline void fgt_remove_edge( void * /*output_port*/, void * /*input_port*/ ) { } + +static inline void fgt_begin_body( void * /*body*/ ) { } +static inline void fgt_end_body( void * /*body*/) { } + +static inline void fgt_async_try_put_begin( void * /*node*/, void * /*port*/ ) { } +static inline void fgt_async_try_put_end( void * /*node*/ , void * /*port*/ ) { } +static inline void fgt_async_reserve( void * /*node*/, void * /*graph*/ ) { } +static inline void fgt_async_commit( void * /*node*/, void * /*graph*/ ) { } +static inline void fgt_reserve_wait( void * /*graph*/ ) { } +static inline void fgt_release_wait( void * /*graph*/ ) { } + +template< typename NodeType > +void fgt_multiinput_multioutput_node_desc( const NodeType * /*node*/, const char * /*desc*/ ) { } + +template < typename PortsTuple, int N > +struct fgt_internal_input_alias_helper { + static void alias_port( void * /*node*/, PortsTuple & /*ports*/ ) { } +}; + +template < typename PortsTuple, int N > +struct fgt_internal_output_alias_helper { + static void alias_port( void * /*node*/, PortsTuple & /*ports*/ ) { } +}; + +#endif // TBB_USE_THREADING_TOOLS + + } // namespace internal +} // namespace tbb + +#endif diff --git a/ohos/arm64-v8a/include/tbb/internal/_flow_graph_types_impl.h b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_types_impl.h new file mode 100644 index 00000000..f374831b --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_flow_graph_types_impl.h @@ -0,0 +1,723 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__flow_graph_types_impl_H +#define __TBB__flow_graph_types_impl_H + +#ifndef __TBB_flow_graph_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +// included in namespace tbb::flow::interfaceX + +namespace internal { + + // the change to key_matching (adding a K and KHash template parameter, making it a class) + // means we have to pass this data to the key_matching_port. All the ports have only one + // template parameter, so we have to wrap the following types in a trait: + // + // . K == key_type + // . KHash == hash and compare for Key + // . TtoK == function_body that given an object of T, returns its K + // . T == type accepted by port, and stored in the hash table + // + // The port will have an additional parameter on node construction, which is a function_body + // that accepts a const T& and returns a K which is the field in T which is its K. + template + struct KeyTrait { + typedef Kp K; + typedef Tp T; + typedef internal::type_to_key_function_body TtoK; + typedef KHashp KHash; + }; + + // wrap each element of a tuple in a template, and make a tuple of the result. + template class PT, typename TypeTuple> + struct wrap_tuple_elements; + + // A wrapper that generates the traits needed for each port of a key-matching join, + // and the type of the tuple of input ports. + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements; + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_VARIADIC_TUPLE_PRESENT + template class PT, typename... Args> + struct wrap_tuple_elements >{ + typedef typename tbb::flow::tuple... > type; + }; + + template class PT, typename KeyTraits, typename... Args> + struct wrap_key_tuple_elements > { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef typename tbb::flow::tuple >... > type; + }; +#else + template class PT, typename TypeTuple> + struct wrap_tuple_elements<1, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<1, PT, KeyTraits, TypeTuple > { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef typename tbb::flow::tuple< PT > type; + }; + + template class PT, typename TypeTuple> + struct wrap_tuple_elements<2, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<2, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef typename tbb::flow::tuple< PT, PT > type; + }; + + template class PT, typename TypeTuple> + struct wrap_tuple_elements<3, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<3, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef KeyTrait::type> KeyTrait2; + typedef typename tbb::flow::tuple< PT, PT, PT > type; + }; + + template class PT, typename TypeTuple> + struct wrap_tuple_elements<4, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type>, + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<4, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef KeyTrait::type> KeyTrait2; + typedef KeyTrait::type> KeyTrait3; + typedef typename tbb::flow::tuple< PT, PT, PT, + PT > type; + }; + + template class PT, typename TypeTuple> + struct wrap_tuple_elements<5, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<5, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef KeyTrait::type> KeyTrait2; + typedef KeyTrait::type> KeyTrait3; + typedef KeyTrait::type> KeyTrait4; + typedef typename tbb::flow::tuple< PT, PT, PT, + PT, PT > type; + }; + +#if __TBB_VARIADIC_MAX >= 6 + template class PT, typename TypeTuple> + struct wrap_tuple_elements<6, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<6, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef KeyTrait::type> KeyTrait2; + typedef KeyTrait::type> KeyTrait3; + typedef KeyTrait::type> KeyTrait4; + typedef KeyTrait::type> KeyTrait5; + typedef typename tbb::flow::tuple< PT, PT, PT, PT, + PT, PT > type; + }; +#endif + +#if __TBB_VARIADIC_MAX >= 7 + template class PT, typename TypeTuple> + struct wrap_tuple_elements<7, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<7, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef KeyTrait::type> KeyTrait2; + typedef KeyTrait::type> KeyTrait3; + typedef KeyTrait::type> KeyTrait4; + typedef KeyTrait::type> KeyTrait5; + typedef KeyTrait::type> KeyTrait6; + typedef typename tbb::flow::tuple< PT, PT, PT, PT, + PT, PT, PT > type; + }; +#endif + +#if __TBB_VARIADIC_MAX >= 8 + template class PT, typename TypeTuple> + struct wrap_tuple_elements<8, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<8, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef KeyTrait::type> KeyTrait2; + typedef KeyTrait::type> KeyTrait3; + typedef KeyTrait::type> KeyTrait4; + typedef KeyTrait::type> KeyTrait5; + typedef KeyTrait::type> KeyTrait6; + typedef KeyTrait::type> KeyTrait7; + typedef typename tbb::flow::tuple< PT, PT, PT, PT, + PT, PT, PT, PT > type; + }; +#endif + +#if __TBB_VARIADIC_MAX >= 9 + template class PT, typename TypeTuple> + struct wrap_tuple_elements<9, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<9, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef KeyTrait::type> KeyTrait2; + typedef KeyTrait::type> KeyTrait3; + typedef KeyTrait::type> KeyTrait4; + typedef KeyTrait::type> KeyTrait5; + typedef KeyTrait::type> KeyTrait6; + typedef KeyTrait::type> KeyTrait7; + typedef KeyTrait::type> KeyTrait8; + typedef typename tbb::flow::tuple< PT, PT, PT, PT, + PT, PT, PT, PT, PT > type; + }; +#endif + +#if __TBB_VARIADIC_MAX >= 10 + template class PT, typename TypeTuple> + struct wrap_tuple_elements<10, PT, TypeTuple> { + typedef typename tbb::flow::tuple< + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type>, + PT::type> > + type; + }; + + template class PT, typename KeyTraits, typename TypeTuple> + struct wrap_key_tuple_elements<10, PT, KeyTraits, TypeTuple> { + typedef typename KeyTraits::key_type K; + typedef typename KeyTraits::hash_compare_type KHash; + typedef KeyTrait::type> KeyTrait0; + typedef KeyTrait::type> KeyTrait1; + typedef KeyTrait::type> KeyTrait2; + typedef KeyTrait::type> KeyTrait3; + typedef KeyTrait::type> KeyTrait4; + typedef KeyTrait::type> KeyTrait5; + typedef KeyTrait::type> KeyTrait6; + typedef KeyTrait::type> KeyTrait7; + typedef KeyTrait::type> KeyTrait8; + typedef KeyTrait::type> KeyTrait9; + typedef typename tbb::flow::tuple< PT, PT, PT, PT, + PT, PT, PT, PT, PT, + PT > type; + }; +#endif +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_VARIADIC_TUPLE_PRESENT */ + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template< int... S > class sequence {}; + + template< int N, int... S > + struct make_sequence : make_sequence < N - 1, N - 1, S... > {}; + + template< int... S > + struct make_sequence < 0, S... > { + typedef sequence type; + }; +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ + +#if __TBB_INITIALIZER_LISTS_PRESENT + // Until C++14 std::initializer_list does not guarantee life time of contained objects. + template + class initializer_list_wrapper { + public: + typedef T value_type; + typedef const T& reference; + typedef const T& const_reference; + typedef size_t size_type; + + typedef T* iterator; + typedef const T* const_iterator; + + initializer_list_wrapper( std::initializer_list il ) __TBB_NOEXCEPT( true ) : my_begin( static_cast(malloc( il.size()*sizeof( T ) )) ) { + iterator dst = my_begin; + for ( typename std::initializer_list::const_iterator src = il.begin(); src != il.end(); ++src ) + new (dst++) T( *src ); + my_end = dst; + } + + initializer_list_wrapper( const initializer_list_wrapper& ilw ) __TBB_NOEXCEPT( true ) : my_begin( static_cast(malloc( ilw.size()*sizeof( T ) )) ) { + iterator dst = my_begin; + for ( typename std::initializer_list::const_iterator src = ilw.begin(); src != ilw.end(); ++src ) + new (dst++) T( *src ); + my_end = dst; + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + initializer_list_wrapper( initializer_list_wrapper&& ilw ) __TBB_NOEXCEPT( true ) : my_begin( ilw.my_begin ), my_end( ilw.my_end ) { + ilw.my_begin = ilw.my_end = NULL; + } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + + ~initializer_list_wrapper() { + if ( my_begin ) + free( my_begin ); + } + + const_iterator begin() const __TBB_NOEXCEPT(true) { return my_begin; } + const_iterator end() const __TBB_NOEXCEPT(true) { return my_end; } + size_t size() const __TBB_NOEXCEPT(true) { return (size_t)(my_end - my_begin); } + + private: + iterator my_begin; + iterator my_end; + }; +#endif /* __TBB_INITIALIZER_LISTS_PRESENT */ + +//! type mimicking std::pair but with trailing fill to ensure each element of an array +//* will have the correct alignment + template + struct type_plus_align { + char first[sizeof(T1)]; + T2 second; + char fill1[REM]; + }; + + template + struct type_plus_align { + char first[sizeof(T1)]; + T2 second; + }; + + template struct alignment_of { + typedef struct { char t; U padded; } test_alignment; + static const size_t value = sizeof(test_alignment) - sizeof(U); + }; + + // T1, T2 are actual types stored. The space defined for T1 in the type returned + // is a char array of the correct size. Type T2 should be trivially-constructible, + // T1 must be explicitly managed. + template + struct aligned_pair { + static const size_t t1_align = alignment_of::value; + static const size_t t2_align = alignment_of::value; + typedef type_plus_align just_pair; + static const size_t max_align = t1_align < t2_align ? t2_align : t1_align; + static const size_t extra_bytes = sizeof(just_pair) % max_align; + static const size_t remainder = extra_bytes ? max_align - extra_bytes : 0; + public: + typedef type_plus_align type; + }; // aligned_pair + +// support for variant type +// type we use when we're not storing a value +struct default_constructed { }; + +// type which contains another type, tests for what type is contained, and references to it. +// internal::Wrapper +// void CopyTo( void *newSpace) : builds a Wrapper copy of itself in newSpace + +// struct to allow us to copy and test the type of objects +struct WrapperBase { + virtual ~WrapperBase() {} + virtual void CopyTo(void* /*newSpace*/) const { } +}; + +// Wrapper contains a T, with the ability to test what T is. The Wrapper can be +// constructed from a T, can be copy-constructed from another Wrapper, and can be +// examined via value(), but not modified. +template +struct Wrapper: public WrapperBase { + typedef T value_type; + typedef T* pointer_type; +private: + T value_space; +public: + const value_type &value() const { return value_space; } + +private: + Wrapper(); + + // on exception will ensure the Wrapper will contain only a trivially-constructed object + struct _unwind_space { + pointer_type space; + _unwind_space(pointer_type p) : space(p) {} + ~_unwind_space() { + if(space) (void) new (space) Wrapper(default_constructed()); + } + }; +public: + explicit Wrapper( const T& other ) : value_space(other) { } + explicit Wrapper(const Wrapper& other) : value_space(other.value_space) { } + + void CopyTo(void* newSpace) const __TBB_override { + _unwind_space guard((pointer_type)newSpace); + (void) new(newSpace) Wrapper(value_space); + guard.space = NULL; + } + ~Wrapper() { } +}; + +// specialization for array objects +template +struct Wrapper : public WrapperBase { + typedef T value_type; + typedef T* pointer_type; + // space must be untyped. + typedef T ArrayType[N]; +private: + // The space is not of type T[N] because when copy-constructing, it would be + // default-initialized and then copied to in some fashion, resulting in two + // constructions and one destruction per element. If the type is char[ ], we + // placement new into each element, resulting in one construction per element. + static const size_t space_size = sizeof(ArrayType) / sizeof(char); + char value_space[space_size]; + + + // on exception will ensure the already-built objects will be destructed + // (the value_space is a char array, so it is already trivially-destructible.) + struct _unwind_class { + pointer_type space; + int already_built; + _unwind_class(pointer_type p) : space(p), already_built(0) {} + ~_unwind_class() { + if(space) { + for(size_t i = already_built; i > 0 ; --i ) space[i-1].~value_type(); + (void) new(space) Wrapper(default_constructed()); + } + } + }; +public: + const ArrayType &value() const { + char *vp = const_cast(value_space); + return reinterpret_cast(*vp); + } + +private: + Wrapper(); +public: + // have to explicitly construct because other decays to a const value_type* + explicit Wrapper(const ArrayType& other) { + _unwind_class guard((pointer_type)value_space); + pointer_type vp = reinterpret_cast(&value_space); + for(size_t i = 0; i < N; ++i ) { + (void) new(vp++) value_type(other[i]); + ++(guard.already_built); + } + guard.space = NULL; + } + explicit Wrapper(const Wrapper& other) : WrapperBase() { + // we have to do the heavy lifting to copy contents + _unwind_class guard((pointer_type)value_space); + pointer_type dp = reinterpret_cast(value_space); + pointer_type sp = reinterpret_cast(const_cast(other.value_space)); + for(size_t i = 0; i < N; ++i, ++dp, ++sp) { + (void) new(dp) value_type(*sp); + ++(guard.already_built); + } + guard.space = NULL; + } + + void CopyTo(void* newSpace) const __TBB_override { + (void) new(newSpace) Wrapper(*this); // exceptions handled in copy constructor + } + + ~Wrapper() { + // have to destroy explicitly in reverse order + pointer_type vp = reinterpret_cast(&value_space); + for(size_t i = N; i > 0 ; --i ) vp[i-1].~value_type(); + } +}; + +// given a tuple, return the type of the element that has the maximum alignment requirement. +// Given a tuple and that type, return the number of elements of the object with the max +// alignment requirement that is at least as big as the largest object in the tuple. + +template struct pick_one; +template struct pick_one { typedef T1 type; }; +template struct pick_one { typedef T2 type; }; + +template< template class Selector, typename T1, typename T2 > +struct pick_max { + typedef typename pick_one< (Selector::value > Selector::value), T1, T2 >::type type; +}; + +template struct size_of { static const int value = sizeof(T); }; + +template< size_t N, class Tuple, template class Selector > struct pick_tuple_max { + typedef typename pick_tuple_max::type LeftMaxType; + typedef typename tbb::flow::tuple_element::type ThisType; + typedef typename pick_max::type type; +}; + +template< class Tuple, template class Selector > struct pick_tuple_max<0, Tuple, Selector> { + typedef typename tbb::flow::tuple_element<0, Tuple>::type type; +}; + +// is the specified type included in a tuple? +template +struct is_element_of { + typedef typename tbb::flow::tuple_element::type T_i; + static const bool value = tbb::internal::is_same_type::value || is_element_of::value; +}; + +template +struct is_element_of { + typedef typename tbb::flow::tuple_element<0, Tuple>::type T_i; + static const bool value = tbb::internal::is_same_type::value; +}; + +// allow the construction of types that are listed tuple. If a disallowed type +// construction is written, a method involving this type is created. The +// type has no definition, so a syntax error is generated. +template struct ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple; + +template struct do_if; +template +struct do_if { + static void construct(void *mySpace, const T& x) { + (void) new(mySpace) Wrapper(x); + } +}; +template +struct do_if { + static void construct(void * /*mySpace*/, const T& x) { + // This method is instantiated when the type T does not match any of the + // element types in the Tuple in variant. + ERROR_Type_Not_allowed_In_Tagged_Msg_Not_Member_Of_Tuple::bad_type(x); + } +}; + +// Tuple tells us the allowed types that variant can hold. It determines the alignment of the space in +// Wrapper, and how big Wrapper is. +// +// the object can only be tested for type, and a read-only reference can be fetched by cast_to(). + +using tbb::internal::punned_cast; +struct tagged_null_type {}; +template +class tagged_msg { + typedef tbb::flow::tuple= 6 + , T5 + #endif + #if __TBB_VARIADIC_MAX >= 7 + , T6 + #endif + #if __TBB_VARIADIC_MAX >= 8 + , T7 + #endif + #if __TBB_VARIADIC_MAX >= 9 + , T8 + #endif + #if __TBB_VARIADIC_MAX >= 10 + , T9 + #endif + > Tuple; + +private: + class variant { + static const size_t N = tbb::flow::tuple_size::value; + typedef typename pick_tuple_max::type AlignType; + typedef typename pick_tuple_max::type MaxSizeType; + static const size_t MaxNBytes = (sizeof(Wrapper)+sizeof(AlignType)-1); + static const size_t MaxNElements = MaxNBytes/sizeof(AlignType); + typedef typename tbb::aligned_space SpaceType; + SpaceType my_space; + static const size_t MaxSize = sizeof(SpaceType); + + public: + variant() { (void) new(&my_space) Wrapper(default_constructed()); } + + template + variant( const T& x ) { + do_if::value>::construct(&my_space,x); + } + + variant(const variant& other) { + const WrapperBase * h = punned_cast(&(other.my_space)); + h->CopyTo(&my_space); + } + + // assignment must destroy and re-create the Wrapper type, as there is no way + // to create a Wrapper-to-Wrapper assign even if we find they agree in type. + void operator=( const variant& rhs ) { + if(&rhs != this) { + WrapperBase *h = punned_cast(&my_space); + h->~WrapperBase(); + const WrapperBase *ch = punned_cast(&(rhs.my_space)); + ch->CopyTo(&my_space); + } + } + + template + const U& variant_cast_to() const { + const Wrapper *h = dynamic_cast*>(punned_cast(&my_space)); + if(!h) { + tbb::internal::throw_exception(tbb::internal::eid_bad_tagged_msg_cast); + } + return h->value(); + } + template + bool variant_is_a() const { return dynamic_cast*>(punned_cast(&my_space)) != NULL; } + + bool variant_is_default_constructed() const {return variant_is_a();} + + ~variant() { + WrapperBase *h = punned_cast(&my_space); + h->~WrapperBase(); + } + }; //class variant + + TagType my_tag; + variant my_msg; + +public: + tagged_msg(): my_tag(TagType(~0)), my_msg(){} + + template + tagged_msg(T const &index, R const &value) : my_tag(index), my_msg(value) {} + + #if __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN + template + tagged_msg(T const &index, R (&value)[N]) : my_tag(index), my_msg(value) {} + #endif + + void set_tag(TagType const &index) {my_tag = index;} + TagType tag() const {return my_tag;} + + template + const V& cast_to() const {return my_msg.template variant_cast_to();} + + template + bool is_a() const {return my_msg.template variant_is_a();} + + bool is_default_constructed() const {return my_msg.variant_is_default_constructed();} +}; //class tagged_msg + +// template to simplify cast and test for tagged_msg in template contexts +template +const V& cast_to(T const &t) { return t.template cast_to(); } + +template +bool is_a(T const &t) { return t.template is_a(); } + +enum op_stat { WAIT = 0, SUCCEEDED, FAILED }; + +} // namespace internal + +#endif /* __TBB__flow_graph_types_impl_H */ diff --git a/ohos/arm64-v8a/include/tbb/internal/_mutex_padding.h b/ohos/arm64-v8a/include/tbb/internal/_mutex_padding.h new file mode 100644 index 00000000..d26f5f48 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_mutex_padding.h @@ -0,0 +1,98 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_mutex_padding_H +#define __TBB_mutex_padding_H + +// wrapper for padding mutexes to be alone on a cache line, without requiring they be allocated +// from a pool. Because we allow them to be defined anywhere they must be two cache lines in size. + + +namespace tbb { +namespace interface7 { +namespace internal { + +static const size_t cache_line_size = 64; + +// Pad a mutex to occupy a number of full cache lines sufficient to avoid false sharing +// with other data; space overhead is up to 2*cache_line_size-1. +template class padded_mutex; + +template +class padded_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { + typedef long pad_type; + pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)]; + + Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);} + +public: + static const bool is_rw_mutex = Mutex::is_rw_mutex; + static const bool is_recursive_mutex = Mutex::is_recursive_mutex; + static const bool is_fair_mutex = Mutex::is_fair_mutex; + + padded_mutex() { new(impl()) Mutex(); } + ~padded_mutex() { impl()->~Mutex(); } + + //! Represents acquisition of a mutex. + class scoped_lock : tbb::internal::no_copy { + typename Mutex::scoped_lock my_scoped_lock; + public: + scoped_lock() : my_scoped_lock() {} + scoped_lock( padded_mutex& m ) : my_scoped_lock(*m.impl()) { } + ~scoped_lock() { } + + void acquire( padded_mutex& m ) { my_scoped_lock.acquire(*m.impl()); } + bool try_acquire( padded_mutex& m ) { return my_scoped_lock.try_acquire(*m.impl()); } + void release() { my_scoped_lock.release(); } + }; +}; + +template +class padded_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { + typedef long pad_type; + pad_type my_pad[((sizeof(Mutex)+cache_line_size-1)/cache_line_size+1)*cache_line_size/sizeof(pad_type)]; + + Mutex *impl() { return (Mutex *)((uintptr_t(this)|(cache_line_size-1))+1);} + +public: + static const bool is_rw_mutex = Mutex::is_rw_mutex; + static const bool is_recursive_mutex = Mutex::is_recursive_mutex; + static const bool is_fair_mutex = Mutex::is_fair_mutex; + + padded_mutex() { new(impl()) Mutex(); } + ~padded_mutex() { impl()->~Mutex(); } + + //! Represents acquisition of a mutex. + class scoped_lock : tbb::internal::no_copy { + typename Mutex::scoped_lock my_scoped_lock; + public: + scoped_lock() : my_scoped_lock() {} + scoped_lock( padded_mutex& m, bool write = true ) : my_scoped_lock(*m.impl(),write) { } + ~scoped_lock() { } + + void acquire( padded_mutex& m, bool write = true ) { my_scoped_lock.acquire(*m.impl(),write); } + bool try_acquire( padded_mutex& m, bool write = true ) { return my_scoped_lock.try_acquire(*m.impl(),write); } + bool upgrade_to_writer() { return my_scoped_lock.upgrade_to_writer(); } + bool downgrade_to_reader() { return my_scoped_lock.downgrade_to_reader(); } + void release() { my_scoped_lock.release(); } + }; +}; + +} // namespace internal +} // namespace interface7 +} // namespace tbb + +#endif /* __TBB_mutex_padding_H */ diff --git a/ohos/arm64-v8a/include/tbb/internal/_node_handle_impl.h b/ohos/arm64-v8a/include/tbb/internal/_node_handle_impl.h new file mode 100644 index 00000000..2088aa8b --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_node_handle_impl.h @@ -0,0 +1,168 @@ +/* + Copyright (c) 2019-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_node_handle_H +#define __TBB_node_handle_H + +#include "_allocator_traits.h" +#include "../tbb_config.h" + + +namespace tbb { + +// This classes must be declared here for correct friendly relationship +// TODO: Consider creation some internal class to access node_handle private fields without any friendly classes +namespace interface5 { +namespace internal { + template + class split_ordered_list; + template + class concurrent_unordered_base; +} +} + +namespace interface10{ +namespace internal { + template + class concurrent_skip_list; +} +} + +namespace internal { + +template +class node_handle_base { +public: + typedef Allocator allocator_type; +protected: + typedef Node node; + typedef tbb::internal::allocator_traits traits_type; +public: + + node_handle_base() : my_node(NULL), my_allocator() {} + node_handle_base(node_handle_base&& nh) : my_node(nh.my_node), + my_allocator(std::move(nh.my_allocator)) { + nh.my_node = NULL; + } + + bool empty() const { return my_node == NULL; } + explicit operator bool() const { return my_node != NULL; } + + ~node_handle_base() { internal_destroy(); } + + node_handle_base& operator=(node_handle_base&& nh) { + internal_destroy(); + my_node = nh.my_node; + typedef typename traits_type::propagate_on_container_move_assignment pocma_type; + tbb::internal::allocator_move_assignment(my_allocator, nh.my_allocator, pocma_type()); + nh.deactivate(); + return *this; + } + + void swap(node_handle_base& nh) { + std::swap(my_node, nh.my_node); + typedef typename traits_type::propagate_on_container_swap pocs_type; + tbb::internal::allocator_swap(my_allocator, nh.my_allocator, pocs_type()); + } + + allocator_type get_allocator() const { + return my_allocator; + } + +protected: + node_handle_base(node* n) : my_node(n) {} + + void internal_destroy() { + if(my_node) { + traits_type::destroy(my_allocator, my_node->storage()); + typename tbb::internal::allocator_rebind::type node_allocator; + node_allocator.deallocate(my_node, 1); + } + } + + void deactivate() { my_node = NULL; } + + node* my_node; + allocator_type my_allocator; +}; + +// node handle for maps +template +class node_handle : public node_handle_base { + typedef node_handle_base base_type; +public: + typedef Key key_type; + typedef typename Value::second_type mapped_type; + typedef typename base_type::allocator_type allocator_type; + + node_handle() : base_type() {} + + key_type& key() const { + __TBB_ASSERT(!this->empty(), "Cannot get key from the empty node_type object"); + return *const_cast(&(this->my_node->value().first)); + } + + mapped_type& mapped() const { + __TBB_ASSERT(!this->empty(), "Cannot get mapped value from the empty node_type object"); + return this->my_node->value().second; + } + +private: + template + friend class tbb::interface5::internal::split_ordered_list; + + template + friend class tbb::interface5::internal::concurrent_unordered_base; + + template + friend class tbb::interface10::internal::concurrent_skip_list; + + node_handle(typename base_type::node* n) : base_type(n) {} +}; + +// node handle for sets +template +class node_handle : public node_handle_base { + typedef node_handle_base base_type; +public: + typedef Key value_type; + typedef typename base_type::allocator_type allocator_type; + + node_handle() : base_type() {} + + value_type& value() const { + __TBB_ASSERT(!this->empty(), "Cannot get value from the empty node_type object"); + return *const_cast(&(this->my_node->value())); + } + +private: + template + friend class tbb::interface5::internal::split_ordered_list; + + template + friend class tbb::interface5::internal::concurrent_unordered_base; + + template + friend class tbb::interface10::internal::concurrent_skip_list; + + node_handle(typename base_type::node* n) : base_type(n) {} +}; + + +}// namespace internal +}// namespace tbb + +#endif /*__TBB_node_handle_H*/ diff --git a/ohos/arm64-v8a/include/tbb/internal/_range_iterator.h b/ohos/arm64-v8a/include/tbb/internal/_range_iterator.h new file mode 100644 index 00000000..df00e88d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_range_iterator.h @@ -0,0 +1,66 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_range_iterator_H +#define __TBB_range_iterator_H + +#include "../tbb_stddef.h" + +#if __TBB_CPP11_STD_BEGIN_END_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_DECLTYPE_PRESENT + #include +#endif + +namespace tbb { + // iterators to first and last elements of container + namespace internal { + +#if __TBB_CPP11_STD_BEGIN_END_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_DECLTYPE_PRESENT + using std::begin; + using std::end; + template + auto first(Container& c)-> decltype(begin(c)) {return begin(c);} + + template + auto first(const Container& c)-> decltype(begin(c)) {return begin(c);} + + template + auto last(Container& c)-> decltype(begin(c)) {return end(c);} + + template + auto last(const Container& c)-> decltype(begin(c)) {return end(c);} +#else + template + typename Container::iterator first(Container& c) {return c.begin();} + + template + typename Container::const_iterator first(const Container& c) {return c.begin();} + + template + typename Container::iterator last(Container& c) {return c.end();} + + template + typename Container::const_iterator last(const Container& c) {return c.end();} +#endif + + template + T* first(T (&arr) [size]) {return arr;} + + template + T* last(T (&arr) [size]) {return arr + size;} + } //namespace internal +} //namespace tbb + +#endif // __TBB_range_iterator_H diff --git a/ohos/arm64-v8a/include/tbb/internal/_tbb_hash_compare_impl.h b/ohos/arm64-v8a/include/tbb/internal/_tbb_hash_compare_impl.h new file mode 100644 index 00000000..82f0df13 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_tbb_hash_compare_impl.h @@ -0,0 +1,105 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// must be included outside namespaces. +#ifndef __TBB_tbb_hash_compare_impl_H +#define __TBB_tbb_hash_compare_impl_H + +#include + +namespace tbb { +namespace interface5 { +namespace internal { + +// Template class for hash compare +template +class hash_compare +{ +public: + typedef Hasher hasher; + typedef Key_equality key_equal; + + hash_compare() {} + + hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {} + + hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {} + + size_t operator()(const Key& key) const { + return ((size_t)my_hash_object(key)); + } + + bool operator()(const Key& key1, const Key& key2) const { + // TODO: get rid of the result invertion + return (!my_key_compare_object(key1, key2)); + } + + Hasher my_hash_object; // The hash object + Key_equality my_key_compare_object; // The equality comparator object +}; + +//! Hash multiplier +static const size_t hash_multiplier = tbb::internal::select_size_t_constant<2654435769U, 11400714819323198485ULL>::value; + +} // namespace internal + +//! Hasher functions +template +__TBB_DEPRECATED_MSG("tbb::tbb_hasher is deprecated, use std::hash") inline size_t tbb_hasher( const T& t ) { + return static_cast( t ) * internal::hash_multiplier; +} +template +__TBB_DEPRECATED_MSG("tbb::tbb_hasher is deprecated, use std::hash") inline size_t tbb_hasher( P* ptr ) { + size_t const h = reinterpret_cast( ptr ); + return (h >> 3) ^ h; +} +template +__TBB_DEPRECATED_MSG("tbb::tbb_hasher is deprecated, use std::hash") inline size_t tbb_hasher( const std::basic_string& s ) { + size_t h = 0; + for( const E* c = s.c_str(); *c; ++c ) + h = static_cast(*c) ^ (h * internal::hash_multiplier); + return h; +} +template +__TBB_DEPRECATED_MSG("tbb::tbb_hasher is deprecated, use std::hash") inline size_t tbb_hasher( const std::pair& p ) { + return tbb_hasher(p.first) ^ tbb_hasher(p.second); +} + +} // namespace interface5 +using interface5::tbb_hasher; + +// Template class for hash compare +template +class __TBB_DEPRECATED_MSG("tbb::tbb_hash is deprecated, use std::hash") tbb_hash +{ +public: + tbb_hash() {} + + size_t operator()(const Key& key) const + { + return tbb_hasher(key); + } +}; + +//! hash_compare that is default argument for concurrent_hash_map +template +struct tbb_hash_compare { + static size_t hash( const Key& a ) { return tbb_hasher(a); } + static bool equal( const Key& a, const Key& b ) { return a == b; } +}; + +} // namespace tbb +#endif /* __TBB_tbb_hash_compare_impl_H */ diff --git a/ohos/arm64-v8a/include/tbb/internal/_tbb_strings.h b/ohos/arm64-v8a/include/tbb/internal/_tbb_strings.h new file mode 100644 index 00000000..df443f3f --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_tbb_strings.h @@ -0,0 +1,79 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +TBB_STRING_RESOURCE(FLOW_BROADCAST_NODE, "broadcast_node") +TBB_STRING_RESOURCE(FLOW_BUFFER_NODE, "buffer_node") +TBB_STRING_RESOURCE(FLOW_CONTINUE_NODE, "continue_node") +TBB_STRING_RESOURCE(FLOW_FUNCTION_NODE, "function_node") +TBB_STRING_RESOURCE(FLOW_JOIN_NODE_QUEUEING, "join_node (queueing)") +TBB_STRING_RESOURCE(FLOW_JOIN_NODE_RESERVING, "join_node (reserving)") +TBB_STRING_RESOURCE(FLOW_JOIN_NODE_TAG_MATCHING, "join_node (tag_matching)") +TBB_STRING_RESOURCE(FLOW_LIMITER_NODE, "limiter_node") +TBB_STRING_RESOURCE(FLOW_MULTIFUNCTION_NODE, "multifunction_node") +TBB_STRING_RESOURCE(FLOW_OR_NODE, "or_node") //no longer in use, kept for backward compatibility +TBB_STRING_RESOURCE(FLOW_OVERWRITE_NODE, "overwrite_node") +TBB_STRING_RESOURCE(FLOW_PRIORITY_QUEUE_NODE, "priority_queue_node") +TBB_STRING_RESOURCE(FLOW_QUEUE_NODE, "queue_node") +TBB_STRING_RESOURCE(FLOW_SEQUENCER_NODE, "sequencer_node") +TBB_STRING_RESOURCE(FLOW_SOURCE_NODE, "source_node") +TBB_STRING_RESOURCE(FLOW_SPLIT_NODE, "split_node") +TBB_STRING_RESOURCE(FLOW_WRITE_ONCE_NODE, "write_once_node") +TBB_STRING_RESOURCE(FLOW_BODY, "body") +TBB_STRING_RESOURCE(FLOW_GRAPH, "graph") +TBB_STRING_RESOURCE(FLOW_NODE, "node") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT, "input_port") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_0, "input_port_0") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_1, "input_port_1") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_2, "input_port_2") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_3, "input_port_3") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_4, "input_port_4") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_5, "input_port_5") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_6, "input_port_6") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_7, "input_port_7") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_8, "input_port_8") +TBB_STRING_RESOURCE(FLOW_INPUT_PORT_9, "input_port_9") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT, "output_port") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_0, "output_port_0") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_1, "output_port_1") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_2, "output_port_2") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_3, "output_port_3") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_4, "output_port_4") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_5, "output_port_5") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_6, "output_port_6") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_7, "output_port_7") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_8, "output_port_8") +TBB_STRING_RESOURCE(FLOW_OUTPUT_PORT_9, "output_port_9") +TBB_STRING_RESOURCE(FLOW_OBJECT_NAME, "object_name") +TBB_STRING_RESOURCE(FLOW_NULL, "null") +TBB_STRING_RESOURCE(FLOW_INDEXER_NODE, "indexer_node") +TBB_STRING_RESOURCE(FLOW_COMPOSITE_NODE, "composite_node") +TBB_STRING_RESOURCE(FLOW_ASYNC_NODE, "async_node") +TBB_STRING_RESOURCE(FLOW_OPENCL_NODE, "opencl_node") +TBB_STRING_RESOURCE(ALGORITHM, "tbb_algorithm") +TBB_STRING_RESOURCE(PARALLEL_FOR, "tbb_parallel_for") +TBB_STRING_RESOURCE(PARALLEL_DO, "tbb_parallel_do") +TBB_STRING_RESOURCE(PARALLEL_INVOKE, "tbb_parallel_invoke") +TBB_STRING_RESOURCE(PARALLEL_REDUCE, "tbb_parallel_reduce") +TBB_STRING_RESOURCE(PARALLEL_SCAN, "tbb_parallel_scan") +TBB_STRING_RESOURCE(PARALLEL_SORT, "tbb_parallel_sort") +TBB_STRING_RESOURCE(CUSTOM_CTX, "tbb_custom") +TBB_STRING_RESOURCE(FLOW_TASKS, "tbb_flow_graph") +TBB_STRING_RESOURCE(PARALLEL_FOR_TASK, "tbb_parallel_for_task") +// TODO: Drop following string prefix "fgt_" here and in FGA's collector +TBB_STRING_RESOURCE(USER_EVENT, "fgt_user_event") +#if __TBB_CPF_BUILD || (TBB_PREVIEW_FLOW_GRAPH_TRACE && TBB_USE_THREADING_TOOLS) +TBB_STRING_RESOURCE(CODE_ADDRESS, "code_address") +#endif diff --git a/ohos/arm64-v8a/include/tbb/internal/_tbb_trace_impl.h b/ohos/arm64-v8a/include/tbb/internal/_tbb_trace_impl.h new file mode 100644 index 00000000..38dc68cf --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_tbb_trace_impl.h @@ -0,0 +1,55 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef _FGT_TBB_TRACE_IMPL_H +#define _FGT_TBB_TRACE_IMPL_H + +#include "../tbb_profiling.h" + +namespace tbb { + namespace internal { + +#if TBB_PREVIEW_ALGORITHM_TRACE + static inline void fgt_algorithm( string_index t, void *algorithm, void *parent ) { + itt_make_task_group( ITT_DOMAIN_FLOW, algorithm, ALGORITHM, parent, ALGORITHM, t ); + } + static inline void fgt_begin_algorithm( string_index t, void *algorithm ) { + itt_task_begin( ITT_DOMAIN_FLOW, algorithm, ALGORITHM, NULL, FLOW_NULL, t ); + } + static inline void fgt_end_algorithm( void * ) { + itt_task_end( ITT_DOMAIN_FLOW ); + } + static inline void fgt_alg_begin_body( string_index t, void *body, void *algorithm ) { + itt_task_begin( ITT_DOMAIN_FLOW, body, FLOW_BODY, algorithm, ALGORITHM, t ); + } + static inline void fgt_alg_end_body( void * ) { + itt_task_end( ITT_DOMAIN_FLOW ); + } + +#else // TBB_PREVIEW_ALGORITHM_TRACE + + static inline void fgt_algorithm( string_index /*t*/, void * /*algorithm*/, void * /*parent*/ ) { } + static inline void fgt_begin_algorithm( string_index /*t*/, void * /*algorithm*/ ) { } + static inline void fgt_end_algorithm( void * ) { } + static inline void fgt_alg_begin_body( string_index /*t*/, void * /*body*/, void * /*algorithm*/ ) { } + static inline void fgt_alg_end_body( void * ) { } + +#endif // TBB_PREVIEW_ALGORITHM_TRACEE + + } // namespace internal +} // namespace tbb + +#endif diff --git a/ohos/arm64-v8a/include/tbb/internal/_tbb_windef.h b/ohos/arm64-v8a/include/tbb/internal/_tbb_windef.h new file mode 100644 index 00000000..b2eb9d5d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_tbb_windef.h @@ -0,0 +1,69 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbb_windef_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif /* __TBB_tbb_windef_H */ + +// Check that the target Windows version has all API calls required for TBB. +// Do not increase the version in condition beyond 0x0500 without prior discussion! +#if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0501 +#error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0501 or greater. +#endif + +#if !defined(_MT) +#error TBB requires linkage with multithreaded C/C++ runtime library. \ + Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch. +#endif + +// Workaround for the problem with MVSC headers failing to define namespace std +namespace std { + using ::size_t; using ::ptrdiff_t; +} + +#define __TBB_STRING_AUX(x) #x +#define __TBB_STRING(x) __TBB_STRING_AUX(x) + +// Default setting of TBB_USE_DEBUG +#ifdef TBB_USE_DEBUG +# if TBB_USE_DEBUG +# if !defined(_DEBUG) +# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0") +# endif +# else +# if defined(_DEBUG) +# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0") +# endif +# endif +#endif + +#if (__TBB_BUILD || __TBBMALLOC_BUILD || __TBBBIND_BUILD) && !defined(__TBB_NO_IMPLICIT_LINKAGE) +#define __TBB_NO_IMPLICIT_LINKAGE 1 +#endif + +#if _MSC_VER + #if !__TBB_NO_IMPLICIT_LINKAGE + #ifdef __TBB_LIB_NAME + #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) + #else + #ifdef _DEBUG + #pragma comment(lib, "tbb_debug.lib") + #else + #pragma comment(lib, "tbb.lib") + #endif + #endif + #endif +#endif diff --git a/ohos/arm64-v8a/include/tbb/internal/_template_helpers.h b/ohos/arm64-v8a/include/tbb/internal/_template_helpers.h new file mode 100644 index 00000000..197f77a1 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_template_helpers.h @@ -0,0 +1,284 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_template_helpers_H +#define __TBB_template_helpers_H + +#include +#include +#include "../tbb_config.h" +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT +#include +#endif +#if __TBB_CPP11_PRESENT +#include +#include // allocator_traits +#endif + +namespace tbb { namespace internal { + +//! Enables one or the other code branches +template struct enable_if {}; +template struct enable_if { typedef T type; }; + +//! Strips its template type argument from cv- and ref-qualifiers +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +//! Specialization for function pointers +template struct strip { typedef T(*type)(); }; +#if __TBB_CPP11_RVALUE_REF_PRESENT +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +template struct strip { typedef T type; }; +#endif +//! Specialization for arrays converts to a corresponding pointer +template struct strip { typedef T* type; }; +template struct strip { typedef const T* type; }; +template struct strip { typedef volatile T* type; }; +template struct strip { typedef const volatile T* type; }; + +//! Detects whether two given types are the same +template struct is_same_type { static const bool value = false; }; +template struct is_same_type { static const bool value = true; }; + +template struct is_ref { static const bool value = false; }; +template struct is_ref { static const bool value = true; }; + +//! Partial support for std::is_integral +template struct is_integral_impl { static const bool value = false; }; +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +#if __TBB_CPP11_PRESENT +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +#endif +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; +template<> struct is_integral_impl { static const bool value = true; }; + +template +struct is_integral : is_integral_impl::type> {}; + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +//! std::void_t internal implementation (to avoid GCC < 4.7 "template aliases" absence) +template struct void_t { typedef void type; }; +#endif + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT + +// Generic SFINAE helper for expression checks, based on the idea demonstrated in ISO C++ paper n4502 +template class... Checks> +struct supports_impl { typedef std::false_type type; }; +template class... Checks> +struct supports_impl...>::type, Checks...> { typedef std::true_type type; }; + +template class... Checks> +using supports = typename supports_impl::type; + +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT */ + +#if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + +//! Allows to store a function parameter pack as a variable and later pass it to another function +template< typename... Types > +struct stored_pack; + +template<> +struct stored_pack<> +{ + typedef stored_pack<> pack_type; + stored_pack() {} + + // Friend front-end functions + template< typename F, typename Pack > friend void call( F&& f, Pack&& p ); + template< typename Ret, typename F, typename Pack > friend Ret call_and_return( F&& f, Pack&& p ); + +protected: + // Ideally, ref-qualified non-static methods would be used, + // but that would greatly reduce the set of compilers where it works. + template< typename Ret, typename F, typename... Preceding > + static Ret call( F&& f, const pack_type& /*pack*/, Preceding&&... params ) { + return std::forward(f)( std::forward(params)... ); + } + template< typename Ret, typename F, typename... Preceding > + static Ret call( F&& f, pack_type&& /*pack*/, Preceding&&... params ) { + return std::forward(f)( std::forward(params)... ); + } +}; + +template< typename T, typename... Types > +struct stored_pack : stored_pack +{ + typedef stored_pack pack_type; + typedef stored_pack pack_remainder; + // Since lifetime of original values is out of control, copies should be made. + // Thus references should be stripped away from the deduced type. + typename strip::type leftmost_value; + + // Here rvalue references act in the same way as forwarding references, + // as long as class template parameters were deduced via forwarding references. + stored_pack( T&& t, Types&&... types ) + : pack_remainder(std::forward(types)...), leftmost_value(std::forward(t)) {} + + // Friend front-end functions + template< typename F, typename Pack > friend void call( F&& f, Pack&& p ); + template< typename Ret, typename F, typename Pack > friend Ret call_and_return( F&& f, Pack&& p ); + +protected: + template< typename Ret, typename F, typename... Preceding > + static Ret call( F&& f, pack_type& pack, Preceding&&... params ) { + return pack_remainder::template call( + std::forward(f), static_cast(pack), + std::forward(params)... , pack.leftmost_value + ); + } + template< typename Ret, typename F, typename... Preceding > + static Ret call( F&& f, const pack_type& pack, Preceding&&... params ) { + return pack_remainder::template call( + std::forward(f), static_cast(pack), + std::forward(params)... , pack.leftmost_value + ); + } + template< typename Ret, typename F, typename... Preceding > + static Ret call( F&& f, pack_type&& pack, Preceding&&... params ) { + return pack_remainder::template call( + std::forward(f), static_cast(pack), + std::forward(params)... , std::move(pack.leftmost_value) + ); + } +}; + +//! Calls the given function with arguments taken from a stored_pack +template< typename F, typename Pack > +void call( F&& f, Pack&& p ) { + strip::type::template call( std::forward(f), std::forward(p) ); +} + +template< typename Ret, typename F, typename Pack > +Ret call_and_return( F&& f, Pack&& p ) { + return strip::type::template call( std::forward(f), std::forward(p) ); +} + +template< typename... Types > +stored_pack save_pack( Types&&... types ) { + return stored_pack( std::forward(types)... ); +} + +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ + +#if __TBB_CPP14_INTEGER_SEQUENCE_PRESENT + +using std::index_sequence; +using std::make_index_sequence; + +#elif __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT + +template class index_sequence {}; + +template +struct make_index_sequence_impl : make_index_sequence_impl < N - 1, N - 1, S... > {}; + +template +struct make_index_sequence_impl <0, S...> { + using type = index_sequence; +}; + +template +using make_index_sequence = typename tbb::internal::make_index_sequence_impl::type; + +#endif /* __TBB_CPP14_INTEGER_SEQUENCE_PRESENT */ + +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +template +struct conjunction; + +template +struct conjunction + : std::conditional, First>::type {}; + +template +struct conjunction : T {}; + +template<> +struct conjunction<> : std::true_type {}; + +#endif + +#if __TBB_CPP11_PRESENT + +template< typename Iter > +using iterator_value_t = typename std::iterator_traits::value_type; + +template< typename Iter > +using iterator_key_t = typename std::remove_const::first_type>::type; + +template< typename Iter > +using iterator_mapped_t = typename iterator_value_t::second_type; + +template< typename A > using value_type = typename A::value_type; +template< typename A > using alloc_ptr_t = typename std::allocator_traits::pointer; +template< typename A > using has_allocate = decltype(std::declval&>() = std::declval().allocate(0)); +template< typename A > using has_deallocate = decltype(std::declval().deallocate(std::declval>(), 0)); + +// value_type should be checked first because it can be used in other checks (via allocator_traits) +template< typename T > +using is_allocator = supports; + +#if __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT + +template< typename T > +static constexpr bool is_allocator_v = is_allocator::value; + +#endif /*__TBB_CPP14_VARIABLE_TEMPLATES */ + +template< std::size_t N, typename... Args > +struct pack_element { + using type = void; +}; + +template< std::size_t N, typename T, typename... Args > +struct pack_element { + using type = typename pack_element::type; +}; + +template< typename T, typename... Args > +struct pack_element<0, T, Args...> { + using type = T; +}; + +template< std::size_t N, typename... Args > +using pack_element_t = typename pack_element::type; + +// Helper alias for heterogeneous lookup functions in containers +// template parameter K and std::conditional are needed to provide immediate context +// and postpone getting is_trasparent from the compare functor until method instantiation. +template +using is_transparent = typename std::conditional::type::is_transparent; + +#endif /* __TBB_CPP11_PRESENT */ + +} } // namespace internal, namespace tbb + +#endif /* __TBB_template_helpers_H */ diff --git a/ohos/arm64-v8a/include/tbb/internal/_warning_suppress_disable_notice.h b/ohos/arm64-v8a/include/tbb/internal/_warning_suppress_disable_notice.h new file mode 100644 index 00000000..1b536463 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_warning_suppress_disable_notice.h @@ -0,0 +1,27 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if __TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES + +#if __INTEL_COMPILER || _MSC_VER +#pragma warning( pop ) +#elif __GNUC__ +#pragma GCC diagnostic pop +#elif __clang__ +#pragma clang diagnostic pop +#endif + +#endif // __TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES diff --git a/ohos/arm64-v8a/include/tbb/internal/_warning_suppress_enable_notice.h b/ohos/arm64-v8a/include/tbb/internal/_warning_suppress_enable_notice.h new file mode 100644 index 00000000..3b927231 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_warning_suppress_enable_notice.h @@ -0,0 +1,32 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "../tbb_config.h" + +#if __TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES + +#if _MSC_VER || __INTEL_COMPILER +#pragma warning( push ) +#pragma warning( disable: 4996 ) +#elif __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#elif __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + +#endif // __TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES diff --git a/ohos/arm64-v8a/include/tbb/internal/_x86_eliding_mutex_impl.h b/ohos/arm64-v8a/include/tbb/internal/_x86_eliding_mutex_impl.h new file mode 100644 index 00000000..11be329d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_x86_eliding_mutex_impl.h @@ -0,0 +1,144 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__x86_eliding_mutex_impl_H +#define __TBB__x86_eliding_mutex_impl_H + +#ifndef __TBB_spin_mutex_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#if ( __TBB_x86_32 || __TBB_x86_64 ) + +namespace tbb { +namespace interface7 { +namespace internal { + +template +class padded_mutex; + +//! An eliding lock that occupies a single byte. +/** A x86_eliding_mutex is an HLE-enabled spin mutex. It is recommended to + put the mutex on a cache line that is not shared by the data it protects. + It should be used for locking short critical sections where the lock is + contended but the data it protects are not. If zero-initialized, the + mutex is considered unheld. + @ingroup synchronization */ +class x86_eliding_mutex : tbb::internal::mutex_copy_deprecated_and_disabled { + //! 0 if lock is released, 1 if lock is acquired. + __TBB_atomic_flag flag; + + friend class padded_mutex; + +public: + //! Construct unacquired lock. + /** Equivalent to zero-initialization of *this. */ + x86_eliding_mutex() : flag(0) {} + +// bug in gcc 3.x.x causes syntax error in spite of the friend declaration above. +// Make the scoped_lock public in that case. +#if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000 +#else + // by default we will not provide the scoped_lock interface. The user + // should use the padded version of the mutex. scoped_lock is used in + // padded_mutex template. +private: +#endif + // scoped_lock in padded_mutex<> is the interface to use. + //! Represents acquisition of a mutex. + class scoped_lock : tbb::internal::no_copy { + private: + //! Points to currently held mutex, or NULL if no lock is held. + x86_eliding_mutex* my_mutex; + + public: + //! Construct without acquiring a mutex. + scoped_lock() : my_mutex(NULL) {} + + //! Construct and acquire lock on a mutex. + scoped_lock( x86_eliding_mutex& m ) : my_mutex(NULL) { acquire(m); } + + //! Acquire lock. + void acquire( x86_eliding_mutex& m ) { + __TBB_ASSERT( !my_mutex, "already holding a lock" ); + + my_mutex=&m; + my_mutex->lock(); + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_acquire( x86_eliding_mutex& m ) { + __TBB_ASSERT( !my_mutex, "already holding a lock" ); + + bool result = m.try_lock(); + if( result ) { + my_mutex = &m; + } + return result; + } + + //! Release lock + void release() { + __TBB_ASSERT( my_mutex, "release on scoped_lock that is not holding a lock" ); + + my_mutex->unlock(); + my_mutex = NULL; + } + + //! Destroy lock. If holding a lock, releases the lock first. + ~scoped_lock() { + if( my_mutex ) { + release(); + } + } + }; +#if __TBB_USE_X86_ELIDING_MUTEX || __TBB_GCC_VERSION < 40000 +#else +public: +#endif /* __TBB_USE_X86_ELIDING_MUTEX */ + + // Mutex traits + static const bool is_rw_mutex = false; + static const bool is_recursive_mutex = false; + static const bool is_fair_mutex = false; + + // ISO C++0x compatibility methods + + //! Acquire lock + void lock() { + __TBB_LockByteElided(flag); + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() { + return __TBB_TryLockByteElided(flag); + } + + //! Release lock + void unlock() { + __TBB_UnlockByteElided( flag ); + } +}; // end of x86_eliding_mutex + +} // namespace internal +} // namespace interface7 +} // namespace tbb + +#endif /* ( __TBB_x86_32 || __TBB_x86_64 ) */ + +#endif /* __TBB__x86_eliding_mutex_impl_H */ diff --git a/ohos/arm64-v8a/include/tbb/internal/_x86_rtm_rw_mutex_impl.h b/ohos/arm64-v8a/include/tbb/internal/_x86_rtm_rw_mutex_impl.h new file mode 100644 index 00000000..9373aaa0 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/internal/_x86_rtm_rw_mutex_impl.h @@ -0,0 +1,223 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB__x86_rtm_rw_mutex_impl_H +#define __TBB__x86_rtm_rw_mutex_impl_H + +#ifndef __TBB_spin_rw_mutex_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#if __TBB_TSX_AVAILABLE + +#include "../tbb_stddef.h" +#include "../tbb_machine.h" +#include "../tbb_profiling.h" +#include "../spin_rw_mutex.h" + +namespace tbb { +namespace interface8 { +namespace internal { + +enum RTM_type { + RTM_not_in_mutex, + RTM_transacting_reader, + RTM_transacting_writer, + RTM_real_reader, + RTM_real_writer +}; + +static const unsigned long speculation_granularity = 64; + +//! Fast, unfair, spinning speculation-enabled reader-writer lock with backoff and +// writer-preference +/** @ingroup synchronization */ +class x86_rtm_rw_mutex: private spin_rw_mutex { +#if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000 +// bug in gcc 3.x.x causes syntax error in spite of the friend declaration below. +// Make the scoped_lock public in that case. +public: +#else +private: +#endif + friend class interface7::internal::padded_mutex; + class scoped_lock; // should be private + friend class scoped_lock; +private: + //! @cond INTERNAL + + //! Internal construct unacquired mutex. + void __TBB_EXPORTED_METHOD internal_construct(); + + //! Internal acquire write lock. + // only_speculate == true if we're doing a try_lock, else false. + void __TBB_EXPORTED_METHOD internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false); + + //! Internal acquire read lock. + // only_speculate == true if we're doing a try_lock, else false. + void __TBB_EXPORTED_METHOD internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock&, bool only_speculate=false); + + //! Internal upgrade reader to become a writer. + bool __TBB_EXPORTED_METHOD internal_upgrade( x86_rtm_rw_mutex::scoped_lock& ); + + //! Out of line code for downgrading a writer to a reader. + bool __TBB_EXPORTED_METHOD internal_downgrade( x86_rtm_rw_mutex::scoped_lock& ); + + //! Internal try_acquire write lock. + bool __TBB_EXPORTED_METHOD internal_try_acquire_writer( x86_rtm_rw_mutex::scoped_lock& ); + + //! Internal release lock. + void __TBB_EXPORTED_METHOD internal_release( x86_rtm_rw_mutex::scoped_lock& ); + + static x86_rtm_rw_mutex* internal_get_mutex( const spin_rw_mutex::scoped_lock& lock ) + { + return static_cast( lock.mutex ); + } + static void internal_set_mutex( spin_rw_mutex::scoped_lock& lock, spin_rw_mutex* mtx ) + { + lock.mutex = mtx; + } + //! @endcond +public: + //! Construct unacquired mutex. + x86_rtm_rw_mutex() { + w_flag = false; +#if TBB_USE_THREADING_TOOLS + internal_construct(); +#endif + } + +#if TBB_USE_ASSERT + //! Empty destructor. + ~x86_rtm_rw_mutex() {} +#endif /* TBB_USE_ASSERT */ + + // Mutex traits + static const bool is_rw_mutex = true; + static const bool is_recursive_mutex = false; + static const bool is_fair_mutex = false; + +#if __TBB_USE_X86_RTM_RW_MUTEX || __TBB_GCC_VERSION < 40000 +#else + // by default we will not provide the scoped_lock interface. The user + // should use the padded version of the mutex. scoped_lock is used in + // padded_mutex template. +private: +#endif + //! The scoped locking pattern + /** It helps to avoid the common problem of forgetting to release lock. + It also nicely provides the "node" for queuing locks. */ + // Speculation-enabled scoped lock for spin_rw_mutex + // The idea is to be able to reuse the acquire/release methods of spin_rw_mutex + // and its scoped lock wherever possible. The only way to use a speculative lock is to use + // a scoped_lock. (because transaction_state must be local) + + class scoped_lock : tbb::internal::no_copy { + friend class x86_rtm_rw_mutex; + spin_rw_mutex::scoped_lock my_scoped_lock; + + RTM_type transaction_state; + + public: + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + scoped_lock() : my_scoped_lock(), transaction_state(RTM_not_in_mutex) { + } + + //! Acquire lock on given mutex. + scoped_lock( x86_rtm_rw_mutex& m, bool write = true ) : my_scoped_lock(), + transaction_state(RTM_not_in_mutex) { + acquire(m, write); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if(transaction_state != RTM_not_in_mutex) release(); + } + + //! Acquire lock on given mutex. + void acquire( x86_rtm_rw_mutex& m, bool write = true ) { + if( write ) m.internal_acquire_writer(*this); + else m.internal_acquire_reader(*this); + } + + //! Release lock + void release() { + x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); + __TBB_ASSERT( mutex, "lock is not acquired" ); + __TBB_ASSERT( transaction_state!=RTM_not_in_mutex, "lock is not acquired" ); + return mutex->internal_release(*this); + } + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + bool upgrade_to_writer() { + x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); + __TBB_ASSERT( mutex, "lock is not acquired" ); + if (transaction_state == RTM_transacting_writer || transaction_state == RTM_real_writer) + return true; // Already a writer + return mutex->internal_upgrade(*this); + } + + //! Downgrade writer to become a reader. + /** Returns whether the downgrade happened without releasing and re-acquiring the lock */ + bool downgrade_to_reader() { + x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); + __TBB_ASSERT( mutex, "lock is not acquired" ); + if (transaction_state == RTM_transacting_reader || transaction_state == RTM_real_reader) + return true; // Already a reader + return mutex->internal_downgrade(*this); + } + + //! Attempt to acquire mutex. + /** returns true if successful. */ + bool try_acquire( x86_rtm_rw_mutex& m, bool write = true ) { +#if TBB_USE_ASSERT + x86_rtm_rw_mutex* mutex = x86_rtm_rw_mutex::internal_get_mutex(my_scoped_lock); + __TBB_ASSERT( !mutex, "lock is already acquired" ); +#endif + // have to assign m to our mutex. + // cannot set the mutex, because try_acquire in spin_rw_mutex depends on it being NULL. + if(write) return m.internal_try_acquire_writer(*this); + // speculatively acquire the lock. If this fails, do try_acquire on the spin_rw_mutex. + m.internal_acquire_reader(*this, /*only_speculate=*/true); + if(transaction_state == RTM_transacting_reader) return true; + if( my_scoped_lock.try_acquire(m, false)) { + transaction_state = RTM_real_reader; + return true; + } + return false; + } + + }; // class x86_rtm_rw_mutex::scoped_lock + + // ISO C++0x compatibility methods not provided because we cannot maintain + // state about whether a thread is in a transaction. + +private: + char pad[speculation_granularity-sizeof(spin_rw_mutex)]; // padding + + // If true, writer holds the spin_rw_mutex. + tbb::atomic w_flag; // want this on a separate cache line + +}; // x86_rtm_rw_mutex + +} // namespace internal +} // namespace interface8 +} // namespace tbb + +#endif /* __TBB_TSX_AVAILABLE */ +#endif /* __TBB__x86_rtm_rw_mutex_impl_H */ diff --git a/ohos/arm64-v8a/include/tbb/iterators.h b/ohos/arm64-v8a/include/tbb/iterators.h new file mode 100644 index 00000000..2d4da9c9 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/iterators.h @@ -0,0 +1,326 @@ +/* + Copyright (c) 2017-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_iterators_H +#define __TBB_iterators_H + +#include +#include + +#include "tbb_config.h" +#include "tbb_stddef.h" + +#if __TBB_CPP11_PRESENT + +#include + +namespace tbb { + +template +class counting_iterator { + __TBB_STATIC_ASSERT(std::numeric_limits::is_integer, "Cannot instantiate counting_iterator with a non-integer type"); +public: + typedef typename std::make_signed::type difference_type; + typedef IntType value_type; + typedef const IntType* pointer; + typedef const IntType& reference; + typedef std::random_access_iterator_tag iterator_category; + + counting_iterator() : my_counter() {} + explicit counting_iterator(IntType init) : my_counter(init) {} + + reference operator*() const { return my_counter; } + value_type operator[](difference_type i) const { return *(*this + i); } + + difference_type operator-(const counting_iterator& it) const { return my_counter - it.my_counter; } + + counting_iterator& operator+=(difference_type forward) { my_counter += forward; return *this; } + counting_iterator& operator-=(difference_type backward) { return *this += -backward; } + counting_iterator& operator++() { return *this += 1; } + counting_iterator& operator--() { return *this -= 1; } + + counting_iterator operator++(int) { + counting_iterator it(*this); + ++(*this); + return it; + } + counting_iterator operator--(int) { + counting_iterator it(*this); + --(*this); + return it; + } + + counting_iterator operator-(difference_type backward) const { return counting_iterator(my_counter - backward); } + counting_iterator operator+(difference_type forward) const { return counting_iterator(my_counter + forward); } + friend counting_iterator operator+(difference_type forward, const counting_iterator it) { return it + forward; } + + bool operator==(const counting_iterator& it) const { return *this - it == 0; } + bool operator!=(const counting_iterator& it) const { return !(*this == it); } + bool operator<(const counting_iterator& it) const {return *this - it < 0; } + bool operator>(const counting_iterator& it) const { return it < *this; } + bool operator<=(const counting_iterator& it) const { return !(*this > it); } + bool operator>=(const counting_iterator& it) const { return !(*this < it); } + +private: + IntType my_counter; +}; +} //namespace tbb + + +#include + +#include "internal/_template_helpers.h" // index_sequence, make_index_sequence + +namespace tbb { +namespace internal { + +template +struct tuple_util { + template + static void increment(TupleType& it, DifferenceType forward) { + std::get(it) += forward; + tuple_util::increment(it, forward); + } + template + static bool check_sync(const TupleType& it1, const TupleType& it2, DifferenceType val) { + if(std::get(it1) - std::get(it2) != val) + return false; + return tuple_util::check_sync(it1, it2, val); + } +}; + +template<> +struct tuple_util<0> { + template + static void increment(TupleType&, DifferenceType) {} + template + static bool check_sync(const TupleType&, const TupleType&, DifferenceType) { return true;} +}; + +template +struct make_references { + template + TupleReturnType operator()(const TupleType& t, tbb::internal::index_sequence) { + return std::tie( *std::get(t)... ); + } +}; + +// A simple wrapper over a tuple of references. +// The class is designed to hold a temporary tuple of reference +// after dereferencing a zip_iterator; in particular, it is needed +// to swap these rvalue tuples. Any other usage is not supported. +template +struct tuplewrapper : public std::tuple::value, T&&>::type...> { + // In the context of this class, T is a reference, so T&& is a "forwarding reference" + typedef std::tuple base_type; + // Construct from the result of std::tie + tuplewrapper(const base_type& in) : base_type(in) {} +#if __INTEL_COMPILER + // ICC cannot generate copy ctor & assignment + tuplewrapper(const tuplewrapper& rhs) : base_type(rhs) {} + tuplewrapper& operator=(const tuplewrapper& rhs) { + *this = base_type(rhs); + return *this; + } +#endif + // Assign any tuple convertible to std::tuple: *it = a_tuple; + template + tuplewrapper& operator=(const std::tuple& other) { + base_type::operator=(other); + return *this; + } +#if _LIBCPP_VERSION + // (Necessary for libc++ tuples) Convert to a tuple of values: v = *it; + operator std::tuple::type...>() { return base_type(*this); } +#endif + // Swap rvalue tuples: swap(*it1,*it2); + friend void swap(tuplewrapper&& a, tuplewrapper&& b) { + std::swap(a,b); + } +}; + +} //namespace internal + +template +class zip_iterator { + __TBB_STATIC_ASSERT(sizeof...(Types)>0, "Cannot instantiate zip_iterator with empty template parameter pack"); + static const std::size_t num_types = sizeof...(Types); + typedef std::tuple it_types; +public: + typedef typename std::make_signed::type difference_type; + typedef std::tuple::value_type...> value_type; +#if __INTEL_COMPILER && __INTEL_COMPILER < 1800 && _MSC_VER + typedef std::tuple::reference...> reference; +#else + typedef tbb::internal::tuplewrapper::reference...> reference; +#endif + typedef std::tuple::pointer...> pointer; + typedef std::random_access_iterator_tag iterator_category; + + zip_iterator() : my_it() {} + explicit zip_iterator(Types... args) : my_it(std::make_tuple(args...)) {} + zip_iterator(const zip_iterator& input) : my_it(input.my_it) {} + zip_iterator& operator=(const zip_iterator& input) { + my_it = input.my_it; + return *this; + } + + reference operator*() const { + return tbb::internal::make_references()(my_it, tbb::internal::make_index_sequence()); + } + reference operator[](difference_type i) const { return *(*this + i); } + + difference_type operator-(const zip_iterator& it) const { + __TBB_ASSERT(internal::tuple_util::check_sync(my_it, it.my_it, std::get<0>(my_it) - std::get<0>(it.my_it)), + "Components of zip_iterator are not synchronous"); + return std::get<0>(my_it) - std::get<0>(it.my_it); + } + + zip_iterator& operator+=(difference_type forward) { + internal::tuple_util::increment(my_it, forward); + return *this; + } + zip_iterator& operator-=(difference_type backward) { return *this += -backward; } + zip_iterator& operator++() { return *this += 1; } + zip_iterator& operator--() { return *this -= 1; } + + zip_iterator operator++(int) { + zip_iterator it(*this); + ++(*this); + return it; + } + zip_iterator operator--(int) { + zip_iterator it(*this); + --(*this); + return it; + } + + zip_iterator operator-(difference_type backward) const { + zip_iterator it(*this); + return it -= backward; + } + zip_iterator operator+(difference_type forward) const { + zip_iterator it(*this); + return it += forward; + } + friend zip_iterator operator+(difference_type forward, const zip_iterator& it) { return it + forward; } + + bool operator==(const zip_iterator& it) const { + return *this - it == 0; + } + it_types base() const { return my_it; } + + bool operator!=(const zip_iterator& it) const { return !(*this == it); } + bool operator<(const zip_iterator& it) const { return *this - it < 0; } + bool operator>(const zip_iterator& it) const { return it < *this; } + bool operator<=(const zip_iterator& it) const { return !(*this > it); } + bool operator>=(const zip_iterator& it) const { return !(*this < it); } +private: + it_types my_it; +}; + +template +zip_iterator make_zip_iterator(T... args) { return zip_iterator(args...); } + +template +class transform_iterator { +public: + typedef typename std::iterator_traits::value_type value_type; + typedef typename std::iterator_traits::difference_type difference_type; +#if __TBB_CPP17_INVOKE_RESULT_PRESENT + typedef typename std::invoke_result::reference>::type reference; +#else + typedef typename std::result_of::reference)>::type reference; +#endif + typedef typename std::iterator_traits::pointer pointer; + typedef typename std::random_access_iterator_tag iterator_category; + + transform_iterator(Iter it, UnaryFunc unary_func) : my_it(it), my_unary_func(unary_func) { + __TBB_STATIC_ASSERT((std::is_same::iterator_category, + std::random_access_iterator_tag>::value), "Random access iterator required."); + } + transform_iterator(const transform_iterator& input) : my_it(input.my_it), my_unary_func(input.my_unary_func) { } + transform_iterator& operator=(const transform_iterator& input) { + my_it = input.my_it; + return *this; + } + reference operator*() const { + return my_unary_func(*my_it); + } + reference operator[](difference_type i) const { + return *(*this + i); + } + transform_iterator& operator++() { + ++my_it; + return *this; + } + transform_iterator& operator--() { + --my_it; + return *this; + } + transform_iterator operator++(int) { + transform_iterator it(*this); + ++(*this); + return it; + } + transform_iterator operator--(int) { + transform_iterator it(*this); + --(*this); + return it; + } + transform_iterator operator+(difference_type forward) const { + return { my_it + forward, my_unary_func }; + } + transform_iterator operator-(difference_type backward) const { + return { my_it - backward, my_unary_func }; + } + transform_iterator& operator+=(difference_type forward) { + my_it += forward; + return *this; + } + transform_iterator& operator-=(difference_type backward) { + my_it -= backward; + return *this; + } + friend transform_iterator operator+(difference_type forward, const transform_iterator& it) { + return it + forward; + } + difference_type operator-(const transform_iterator& it) const { + return my_it - it.my_it; + } + bool operator==(const transform_iterator& it) const { return *this - it == 0; } + bool operator!=(const transform_iterator& it) const { return !(*this == it); } + bool operator<(const transform_iterator& it) const { return *this - it < 0; } + bool operator>(const transform_iterator& it) const { return it < *this; } + bool operator<=(const transform_iterator& it) const { return !(*this > it); } + bool operator>=(const transform_iterator& it) const { return !(*this < it); } + + Iter base() const { return my_it; } +private: + Iter my_it; + const UnaryFunc my_unary_func; +}; + +template +transform_iterator make_transform_iterator(Iter it, UnaryFunc unary_func) { + return transform_iterator(it, unary_func); +} + +} //namespace tbb + +#endif //__TBB_CPP11_PRESENT + +#endif /* __TBB_iterators_H */ diff --git a/ohos/arm64-v8a/include/tbb/machine/gcc_arm.h b/ohos/arm64-v8a/include/tbb/machine/gcc_arm.h new file mode 100644 index 00000000..284a3f9e --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/gcc_arm.h @@ -0,0 +1,216 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Platform isolation layer for the ARMv7-a architecture. +*/ + +#ifndef __TBB_machine_H +#error Do not include this file directly; include tbb_machine.h instead +#endif + +#if __ARM_ARCH_7A__ + +#include +#include + +#define __TBB_WORDSIZE 4 + +// Traditionally ARM is little-endian. +// Note that, since only the layout of aligned 32-bit words is of interest, +// any apparent PDP-endianness of 32-bit words at half-word alignment or +// any little-endian ordering of big-endian 32-bit words in 64-bit quantities +// may be disregarded for this setting. +#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG +#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE +#elif defined(__BYTE_ORDER__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED +#else + #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT +#endif + + +#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") +#define __TBB_full_memory_fence() __asm__ __volatile__("dmb ish": : :"memory") +#define __TBB_control_consistency_helper() __TBB_full_memory_fence() +#define __TBB_acquire_consistency_helper() __TBB_full_memory_fence() +#define __TBB_release_consistency_helper() __TBB_full_memory_fence() + +//-------------------------------------------------- +// Compare and swap +//-------------------------------------------------- + +/** + * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr + * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand + * @param value value to assign *ptr to if *ptr==comparand + * @param comparand value to compare with *ptr + * @return value originally in memory at ptr, regardless of success +*/ +static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ) +{ + int32_t oldval, res; + + __TBB_full_memory_fence(); + + do { + __asm__ __volatile__( + "ldrex %1, [%3]\n" + "mov %0, #0\n" + "cmp %1, %4\n" + "it eq\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (*(volatile int32_t*)ptr) + : "r" ((volatile int32_t *)ptr), "Ir" (comparand), "r" (value) + : "cc"); + } while (res); + + __TBB_full_memory_fence(); + + return oldval; +} + +/** + * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr + * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand + * @param value value to assign *ptr to if *ptr==comparand + * @param comparand value to compare with *ptr + * @return value originally in memory at ptr, regardless of success + */ +static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ) +{ + int64_t oldval; + int32_t res; + + __TBB_full_memory_fence(); + + do { + __asm__ __volatile__( + "mov %0, #0\n" + "ldrexd %1, %H1, [%3]\n" + "cmp %1, %4\n" + "it eq\n" + "cmpeq %H1, %H4\n" + "it eq\n" + "strexdeq %0, %5, %H5, [%3]" + : "=&r" (res), "=&r" (oldval), "+Qo" (*(volatile int64_t*)ptr) + : "r" ((volatile int64_t *)ptr), "r" (comparand), "r" (value) + : "cc"); + } while (res); + + __TBB_full_memory_fence(); + + return oldval; +} + +static inline int32_t __TBB_machine_fetchadd4(volatile void* ptr, int32_t addend) +{ + unsigned long tmp; + int32_t result, tmp2; + + __TBB_full_memory_fence(); + + __asm__ __volatile__( +"1: ldrex %0, [%4]\n" +" add %3, %0, %5\n" +" strex %1, %3, [%4]\n" +" cmp %1, #0\n" +" bne 1b\n" + : "=&r" (result), "=&r" (tmp), "+Qo" (*(volatile int32_t*)ptr), "=&r"(tmp2) + : "r" ((volatile int32_t *)ptr), "Ir" (addend) + : "cc"); + + __TBB_full_memory_fence(); + + return result; +} + +static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend) +{ + unsigned long tmp; + int64_t result, tmp2; + + __TBB_full_memory_fence(); + + __asm__ __volatile__( +"1: ldrexd %0, %H0, [%4]\n" +" adds %3, %0, %5\n" +" adc %H3, %H0, %H5\n" +" strexd %1, %3, %H3, [%4]\n" +" cmp %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (*(volatile int64_t*)ptr), "=&r"(tmp2) + : "r" ((volatile int64_t *)ptr), "r" (addend) + : "cc"); + + + __TBB_full_memory_fence(); + + return result; +} + +namespace tbb { +namespace internal { + template + struct machine_load_store_relaxed { + static inline T load ( const volatile T& location ) { + const T value = location; + + /* + * An extra memory barrier is required for errata #761319 + * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a + */ + __TBB_acquire_consistency_helper(); + return value; + } + + static inline void store ( volatile T& location, T value ) { + location = value; + } + }; +}} // namespaces internal, tbb + +// Machine specific atomic operations + +#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) +#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) + +// Use generics for some things +#define __TBB_USE_GENERIC_PART_WORD_CAS 1 +#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 +#define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE 1 +#define __TBB_USE_GENERIC_FETCH_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 +#elif defined __aarch64__ +// Generic gcc implementations are fine for ARMv8-a except __TBB_PAUSE. +#include "gcc_generic.h" +#else +#error compilation requires an ARMv7-a or ARMv8-a architecture. +#endif // __ARM_ARCH_7A__ + +inline void __TBB_machine_pause (int32_t delay) +{ + while(delay>0) + { + __asm__ __volatile__("yield" ::: "memory"); + delay--; + } +} +#define __TBB_Pause(V) __TBB_machine_pause(V) diff --git a/ohos/arm64-v8a/include/tbb/machine/gcc_generic.h b/ohos/arm64-v8a/include/tbb/machine/gcc_generic.h new file mode 100644 index 00000000..afc6f8c6 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/gcc_generic.h @@ -0,0 +1,233 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_generic_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_gcc_generic_H + +#include +#include + +#define __TBB_WORDSIZE __SIZEOF_POINTER__ + +#if __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN + #define __TBB_64BIT_ATOMICS 0 +#endif + +/** FPU control setting not available for non-Intel architectures on Android **/ +#if (__ANDROID__ || __OHOS__) && __TBB_generic_arch + #define __TBB_CPU_CTL_ENV_PRESENT 0 +#endif + +// __BYTE_ORDER__ is used in accordance with http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html, +// but __BIG_ENDIAN__ or __LITTLE_ENDIAN__ may be more commonly found instead. +#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG +#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE +#elif defined(__BYTE_ORDER__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED +#else + #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT +#endif + +#if __TBB_GCC_VERSION < 40700 +// Use __sync_* builtins + +/** As this generic implementation has absolutely no information about underlying + hardware, its performance most likely will be sub-optimal because of full memory + fence usages where a more lightweight synchronization means (or none at all) + could suffice. Thus if you use this header to enable TBB on a new platform, + consider forking it and relaxing below helpers as appropriate. **/ +#define __TBB_acquire_consistency_helper() __sync_synchronize() +#define __TBB_release_consistency_helper() __sync_synchronize() +#define __TBB_full_memory_fence() __sync_synchronize() +#define __TBB_control_consistency_helper() __sync_synchronize() + +#define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \ +inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ + return __sync_val_compare_and_swap(reinterpret_cast(ptr),comparand,value); \ +} \ +inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ + return __sync_fetch_and_add(reinterpret_cast(ptr),value); \ +} + +#define __TBB_USE_GENERIC_FETCH_STORE 1 + +#else +// __TBB_GCC_VERSION >= 40700; use __atomic_* builtins available since gcc 4.7 + +#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") +// Acquire and release fence intrinsics in GCC might miss compiler fence. +// Adding it at both sides of an intrinsic, as we do not know what reordering can be made. +#define __TBB_acquire_consistency_helper() __TBB_compiler_fence(); __atomic_thread_fence(__ATOMIC_ACQUIRE); __TBB_compiler_fence() +#define __TBB_release_consistency_helper() __TBB_compiler_fence(); __atomic_thread_fence(__ATOMIC_RELEASE); __TBB_compiler_fence() +#define __TBB_full_memory_fence() __atomic_thread_fence(__ATOMIC_SEQ_CST) +#define __TBB_control_consistency_helper() __TBB_acquire_consistency_helper() + +#define __TBB_MACHINE_DEFINE_ATOMICS(S,T) \ +inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ + (void)__atomic_compare_exchange_n(reinterpret_cast(ptr), &comparand, value, \ + false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ + return comparand; \ +} \ +inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ + return __atomic_fetch_add(reinterpret_cast(ptr), value, __ATOMIC_SEQ_CST); \ +} \ +inline T __TBB_machine_fetchstore##S( volatile void *ptr, T value ) { \ + return __atomic_exchange_n(reinterpret_cast(ptr), value, __ATOMIC_SEQ_CST); \ +} + +#endif // __TBB_GCC_VERSION < 40700 + +__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t) +__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t) +__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t) +__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t) + +#undef __TBB_MACHINE_DEFINE_ATOMICS + +typedef unsigned char __TBB_Flag; +typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; + +#if __TBB_GCC_VERSION < 40700 +// Use __sync_* builtins + +// Use generic machine_load_store functions if there are no builtin atomics +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + +static inline void __TBB_machine_or( volatile void *ptr, uintptr_t addend ) { + __sync_fetch_and_or(reinterpret_cast(ptr),addend); +} + +static inline void __TBB_machine_and( volatile void *ptr, uintptr_t addend ) { + __sync_fetch_and_and(reinterpret_cast(ptr),addend); +} + +inline bool __TBB_machine_try_lock_byte( __TBB_atomic_flag &flag ) { + return __sync_lock_test_and_set(&flag,1)==0; +} + +inline void __TBB_machine_unlock_byte( __TBB_atomic_flag &flag ) { + __sync_lock_release(&flag); +} + +#else +// __TBB_GCC_VERSION >= 40700; use __atomic_* builtins available since gcc 4.7 + +static inline void __TBB_machine_or( volatile void *ptr, uintptr_t addend ) { + __atomic_fetch_or(reinterpret_cast(ptr),addend,__ATOMIC_SEQ_CST); +} + +static inline void __TBB_machine_and( volatile void *ptr, uintptr_t addend ) { + __atomic_fetch_and(reinterpret_cast(ptr),addend,__ATOMIC_SEQ_CST); +} + +inline bool __TBB_machine_try_lock_byte( __TBB_atomic_flag &flag ) { + return !__atomic_test_and_set(&flag,__ATOMIC_ACQUIRE); +} + +inline void __TBB_machine_unlock_byte( __TBB_atomic_flag &flag ) { + __atomic_clear(&flag,__ATOMIC_RELEASE); +} + +namespace tbb { namespace internal { + +/** GCC atomic operation intrinsics might miss compiler fence. + Adding it after load-with-acquire, before store-with-release, and + on both sides of sequentially consistent operations is sufficient for correctness. **/ + +template +inline T __TBB_machine_atomic_load( const volatile T& location) { + if (MemOrder == __ATOMIC_SEQ_CST) __TBB_compiler_fence(); + T value = __atomic_load_n(&location, MemOrder); + if (MemOrder != __ATOMIC_RELAXED) __TBB_compiler_fence(); + return value; +} + +template +inline void __TBB_machine_atomic_store( volatile T& location, T value) { + if (MemOrder != __ATOMIC_RELAXED) __TBB_compiler_fence(); + __atomic_store_n(&location, value, MemOrder); + if (MemOrder == __ATOMIC_SEQ_CST) __TBB_compiler_fence(); +} + +template +struct machine_load_store { + static T load_with_acquire ( const volatile T& location ) { + return __TBB_machine_atomic_load(location); + } + static void store_with_release ( volatile T &location, T value ) { + __TBB_machine_atomic_store(location, value); + } +}; + +template +struct machine_load_store_relaxed { + static inline T load ( const volatile T& location ) { + return __TBB_machine_atomic_load(location); + } + static inline void store ( volatile T& location, T value ) { + __TBB_machine_atomic_store(location, value); + } +}; + +template +struct machine_load_store_seq_cst { + static T load ( const volatile T& location ) { + return __TBB_machine_atomic_load(location); + } + static void store ( volatile T &location, T value ) { + __TBB_machine_atomic_store(location, value); + } +}; + +}} // namespace tbb::internal + +#endif // __TBB_GCC_VERSION < 40700 + +// Machine specific atomic operations +#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) +#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) + +#define __TBB_TryLockByte __TBB_machine_try_lock_byte +#define __TBB_UnlockByte __TBB_machine_unlock_byte + +// __builtin_clz counts the number of leading zeroes +namespace tbb{ namespace internal { namespace gcc_builtins { + inline int clz(unsigned int x){ return __builtin_clz(x); } + inline int clz(unsigned long int x){ return __builtin_clzl(x); } + inline int clz(unsigned long long int x){ return __builtin_clzll(x); } +}}} +// logarithm is the index of the most significant non-zero bit +static inline intptr_t __TBB_machine_lg( uintptr_t x ) { + // If P is a power of 2 and x +static inline intptr_t __TBB_machine_lg( T x ) { + __TBB_ASSERT(x>0, "The logarithm of a non-positive value is undefined."); + uintptr_t j, i = x; + __asm__("bsr %1,%0" : "=r"(j) : "r"(i)); + return j; +} +#define __TBB_Log2(V) __TBB_machine_lg(V) +#endif /* !__TBB_Log2 */ + +#ifndef __TBB_Pause +//TODO: check if raising a ratio of pause instructions to loop control instructions +//(via e.g. loop unrolling) gives any benefit for HT. E.g, the current implementation +//does about 2 CPU-consuming instructions for every pause instruction. Perhaps for +//high pause counts it should use an unrolled loop to raise the ratio, and thus free +//up more integer cycles for the other hyperthread. On the other hand, if the loop is +//unrolled too far, it won't fit in the core's loop cache, and thus take away +//instruction decode slots from the other hyperthread. + +//TODO: check if use of gcc __builtin_ia32_pause intrinsic gives a "some how" better performing code +static inline void __TBB_machine_pause( int32_t delay ) { + for (int32_t i = 0; i < delay; i++) { + __asm__ __volatile__("pause;"); + } + return; +} +#define __TBB_Pause(V) __TBB_machine_pause(V) +#endif /* !__TBB_Pause */ + +namespace tbb { namespace internal { typedef uint64_t machine_tsc_t; } } +static inline tbb::internal::machine_tsc_t __TBB_machine_time_stamp() { +#if __INTEL_COMPILER + return _rdtsc(); +#else + tbb::internal::uint32_t hi, lo; + __asm__ __volatile__("rdtsc" : "=d"(hi), "=a"(lo)); + return (tbb::internal::machine_tsc_t( hi ) << 32) | lo; +#endif +} +#define __TBB_time_stamp() __TBB_machine_time_stamp() + +// API to retrieve/update FPU control setting +#ifndef __TBB_CPU_CTL_ENV_PRESENT +#define __TBB_CPU_CTL_ENV_PRESENT 1 +namespace tbb { +namespace internal { +class cpu_ctl_env { +private: + int mxcsr; + short x87cw; + static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */ +public: + bool operator!=( const cpu_ctl_env& ctl ) const { return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; } + void get_env() { + #if __TBB_ICC_12_0_INL_ASM_FSTCW_BROKEN + cpu_ctl_env loc_ctl; + __asm__ __volatile__ ( + "stmxcsr %0\n\t" + "fstcw %1" + : "=m"(loc_ctl.mxcsr), "=m"(loc_ctl.x87cw) + ); + *this = loc_ctl; + #else + __asm__ __volatile__ ( + "stmxcsr %0\n\t" + "fstcw %1" + : "=m"(mxcsr), "=m"(x87cw) + ); + #endif + mxcsr &= MXCSR_CONTROL_MASK; + } + void set_env() const { + __asm__ __volatile__ ( + "ldmxcsr %0\n\t" + "fldcw %1" + : : "m"(mxcsr), "m"(x87cw) + ); + } +}; +} // namespace internal +} // namespace tbb +#endif /* !__TBB_CPU_CTL_ENV_PRESENT */ + +#include "gcc_itsx.h" + +#endif /* __TBB_machine_gcc_ia32_common_H */ diff --git a/ohos/arm64-v8a/include/tbb/machine/gcc_itsx.h b/ohos/arm64-v8a/include/tbb/machine/gcc_itsx.h new file mode 100644 index 00000000..5e93a202 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/gcc_itsx.h @@ -0,0 +1,119 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_itsx_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_gcc_itsx_H + +#define __TBB_OP_XACQUIRE 0xF2 +#define __TBB_OP_XRELEASE 0xF3 +#define __TBB_OP_LOCK 0xF0 + +#define __TBB_STRINGIZE_INTERNAL(arg) #arg +#define __TBB_STRINGIZE(arg) __TBB_STRINGIZE_INTERNAL(arg) + +#ifdef __TBB_x86_64 +#define __TBB_r_out "=r" +#else +#define __TBB_r_out "=q" +#endif + +inline static uint8_t __TBB_machine_try_lock_elided( volatile uint8_t* lk ) +{ + uint8_t value = 1; + __asm__ volatile (".byte " __TBB_STRINGIZE(__TBB_OP_XACQUIRE)"; lock; xchgb %0, %1;" + : __TBB_r_out(value), "=m"(*lk) : "0"(value), "m"(*lk) : "memory" ); + return uint8_t(value^1); +} + +inline static void __TBB_machine_try_lock_elided_cancel() +{ + // 'pause' instruction aborts HLE/RTM transactions + __asm__ volatile ("pause\n" : : : "memory" ); +} + +inline static void __TBB_machine_unlock_elided( volatile uint8_t* lk ) +{ + __asm__ volatile (".byte " __TBB_STRINGIZE(__TBB_OP_XRELEASE)"; movb $0, %0" + : "=m"(*lk) : "m"(*lk) : "memory" ); +} + +#if __TBB_TSX_INTRINSICS_PRESENT +#include + +#define __TBB_machine_is_in_transaction _xtest +#define __TBB_machine_begin_transaction _xbegin +#define __TBB_machine_end_transaction _xend +#define __TBB_machine_transaction_conflict_abort() _xabort(0xff) + +#else + +/*! + * Check if the instruction is executed in a transaction or not + */ +inline static bool __TBB_machine_is_in_transaction() +{ + int8_t res = 0; +#if __TBB_x86_32 + __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD6;\n" + "setz %0" : "=q"(res) : : "memory" ); +#else + __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD6;\n" + "setz %0" : "=r"(res) : : "memory" ); +#endif + return res==0; +} + +/*! + * Enter speculative execution mode. + * @return -1 on success + * abort cause ( or 0 ) on abort + */ +inline static uint32_t __TBB_machine_begin_transaction() +{ + uint32_t res = ~uint32_t(0); // success value + __asm__ volatile ("1: .byte 0xC7; .byte 0xF8;\n" // XBEGIN + " .long 2f-1b-6\n" // 2f-1b == difference in addresses of start + // of XBEGIN and the MOVL + // 2f - 1b - 6 == that difference minus the size of the + // XBEGIN instruction. This is the abort offset to + // 2: below. + " jmp 3f\n" // success (leave -1 in res) + "2: movl %%eax,%0\n" // store failure code in res + "3:" + :"=r"(res):"0"(res):"memory","%eax"); + return res; +} + +/*! + * Attempt to commit/end transaction + */ +inline static void __TBB_machine_end_transaction() +{ + __asm__ volatile (".byte 0x0F; .byte 0x01; .byte 0xD5" :::"memory"); // XEND +} + +/* + * aborts with code 0xFF (lock already held) + */ +inline static void __TBB_machine_transaction_conflict_abort() +{ + __asm__ volatile (".byte 0xC6; .byte 0xF8; .byte 0xFF" :::"memory"); +} + +#endif /* __TBB_TSX_INTRINSICS_PRESENT */ diff --git a/ohos/arm64-v8a/include/tbb/machine/ibm_aix51.h b/ohos/arm64-v8a/include/tbb/machine/ibm_aix51.h new file mode 100644 index 00000000..c8246848 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/ibm_aix51.h @@ -0,0 +1,66 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// TODO: revise by comparing with mac_ppc.h + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_ibm_aix51_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_ibm_aix51_H + +#define __TBB_WORDSIZE 8 +#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG // assumption based on operating system + +#include +#include +#include + +extern "C" { +int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand); +int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand); +void __TBB_machine_flush (); +void __TBB_machine_lwsync (); +void __TBB_machine_isync (); +} + +// Mapping of old entry point names retained for the sake of backward binary compatibility +#define __TBB_machine_cmpswp4 __TBB_machine_cas_32 +#define __TBB_machine_cmpswp8 __TBB_machine_cas_64 + +#define __TBB_Yield() sched_yield() + +#define __TBB_USE_GENERIC_PART_WORD_CAS 1 +#define __TBB_USE_GENERIC_FETCH_ADD 1 +#define __TBB_USE_GENERIC_FETCH_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + +#if __GNUC__ + #define __TBB_control_consistency_helper() __asm__ __volatile__( "isync": : :"memory") + #define __TBB_acquire_consistency_helper() __asm__ __volatile__("lwsync": : :"memory") + #define __TBB_release_consistency_helper() __asm__ __volatile__("lwsync": : :"memory") + #define __TBB_full_memory_fence() __asm__ __volatile__( "sync": : :"memory") +#else + // IBM C++ Compiler does not support inline assembly + // TODO: Since XL 9.0 or earlier GCC syntax is supported. Replace with more + // lightweight implementation (like in mac_ppc.h) + #define __TBB_control_consistency_helper() __TBB_machine_isync () + #define __TBB_acquire_consistency_helper() __TBB_machine_lwsync () + #define __TBB_release_consistency_helper() __TBB_machine_lwsync () + #define __TBB_full_memory_fence() __TBB_machine_flush () +#endif diff --git a/ohos/arm64-v8a/include/tbb/machine/icc_generic.h b/ohos/arm64-v8a/include/tbb/machine/icc_generic.h new file mode 100644 index 00000000..c4675b8e --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/icc_generic.h @@ -0,0 +1,258 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT + #error "Intel(R) C++ Compiler of at least 12.0 version is needed to use ICC intrinsics port" +#endif + +#define __TBB_machine_icc_generic_H + +//ICC mimics the "native" target compiler +#if _MSC_VER + #include "msvc_ia32_common.h" +#else + #include "gcc_ia32_common.h" +#endif + +//TODO: Make __TBB_WORDSIZE macro optional for ICC intrinsics port. +//As compiler intrinsics are used for all the operations it is possible to do. + +#if __TBB_x86_32 + #define __TBB_WORDSIZE 4 +#else + #define __TBB_WORDSIZE 8 +#endif +#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE + +//__TBB_compiler_fence() defined just in case, as it seems not to be used on its own anywhere else +#ifndef __TBB_compiler_fence +#if _MSC_VER + //TODO: any way to use same intrinsics on windows and linux? + #pragma intrinsic(_ReadWriteBarrier) + #define __TBB_compiler_fence() _ReadWriteBarrier() +#else + #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") +#endif +#endif + +#ifndef __TBB_full_memory_fence +#if _MSC_VER + //TODO: any way to use same intrinsics on windows and linux? + #pragma intrinsic(_mm_mfence) + #define __TBB_full_memory_fence() _mm_mfence() +#else + #define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") +#endif +#endif + +#ifndef __TBB_control_consistency_helper +#define __TBB_control_consistency_helper() __TBB_compiler_fence() +#endif + +namespace tbb { namespace internal { +//TODO: is there any way to reuse definition of memory_order enum from ICC instead of copy paste. +//however it seems unlikely that ICC will silently change exact enum values, as they are defined +//in the ISO exactly like this. +//TODO: add test that exact values of the enum are same as in the ISO C++11 +typedef enum memory_order { + memory_order_relaxed, memory_order_consume, memory_order_acquire, + memory_order_release, memory_order_acq_rel, memory_order_seq_cst +} memory_order; + +namespace icc_intrinsics_port { + template + T convert_argument(T value){ + return value; + } + //The overload below is needed to have explicit conversion of pointer to void* in argument list. + //compiler bug? + //TODO: add according broken macro and recheck with ICC 13.0 if the overload is still needed + template + void* convert_argument(T* value){ + return (void*)value; + } +} +//TODO: code below is a bit repetitive, consider simplifying it +template +struct machine_load_store { + static T load_with_acquire ( const volatile T& location ) { + return __atomic_load_explicit(&location, memory_order_acquire); + } + static void store_with_release ( volatile T &location, T value ) { + __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release); + } +}; + +template +struct machine_load_store_relaxed { + static inline T load ( const T& location ) { + return __atomic_load_explicit(&location, memory_order_relaxed); + } + static inline void store ( T& location, T value ) { + __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed); + } +}; + +template +struct machine_load_store_seq_cst { + static T load ( const volatile T& location ) { + return __atomic_load_explicit(&location, memory_order_seq_cst); + } + + static void store ( volatile T &location, T value ) { + __atomic_store_explicit(&location, value, memory_order_seq_cst); + } +}; + +}} // namespace tbb::internal + +namespace tbb{ namespace internal { namespace icc_intrinsics_port{ + typedef enum memory_order_map { + relaxed = memory_order_relaxed, + acquire = memory_order_acquire, + release = memory_order_release, + full_fence= memory_order_seq_cst + } memory_order_map; +}}}// namespace tbb::internal + +#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,M) \ +inline T __TBB_machine_cmpswp##S##M( volatile void *ptr, T value, T comparand ) { \ + __atomic_compare_exchange_strong_explicit( \ + (T*)ptr \ + ,&comparand \ + ,value \ + , tbb::internal::icc_intrinsics_port::M \ + , tbb::internal::icc_intrinsics_port::M); \ + return comparand; \ +} \ + \ +inline T __TBB_machine_fetchstore##S##M(volatile void *ptr, T value) { \ + return __atomic_exchange_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \ +} \ + \ +inline T __TBB_machine_fetchadd##S##M(volatile void *ptr, T value) { \ + return __atomic_fetch_add_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \ +} \ + +__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, full_fence) +__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, acquire) +__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, release) +__TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, relaxed) + +__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, full_fence) +__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, acquire) +__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, release) +__TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, relaxed) + +__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, full_fence) +__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, acquire) +__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, release) +__TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, relaxed) + +__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, full_fence) +__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, acquire) +__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, release) +__TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, relaxed) + + +#undef __TBB_MACHINE_DEFINE_ATOMICS + +#define __TBB_USE_FENCED_ATOMICS 1 + +namespace tbb { namespace internal { +#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN +__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence) +__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence) + +__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(acquire) +__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(release) + +__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(relaxed) +__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(relaxed) + +template +struct machine_load_store { + static T load_with_acquire ( const volatile T& location ) { + if( tbb::internal::is_aligned(&location,8)) { + return __atomic_load_explicit(&location, memory_order_acquire); + } else { + return __TBB_machine_generic_load8acquire(&location); + } + } + static void store_with_release ( volatile T &location, T value ) { + if( tbb::internal::is_aligned(&location,8)) { + __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release); + } else { + return __TBB_machine_generic_store8release(&location,value); + } + } +}; + +template +struct machine_load_store_relaxed { + static T load( const volatile T& location ) { + if( tbb::internal::is_aligned(&location,8)) { + return __atomic_load_explicit(&location, memory_order_relaxed); + } else { + return __TBB_machine_generic_load8relaxed(&location); + } + } + static void store( volatile T &location, T value ) { + if( tbb::internal::is_aligned(&location,8)) { + __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed); + } else { + return __TBB_machine_generic_store8relaxed(&location,value); + } + } +}; + +template +struct machine_load_store_seq_cst { + static T load ( const volatile T& location ) { + if( tbb::internal::is_aligned(&location,8)) { + return __atomic_load_explicit(&location, memory_order_seq_cst); + } else { + return __TBB_machine_generic_load8full_fence(&location); + } + + } + + static void store ( volatile T &location, T value ) { + if( tbb::internal::is_aligned(&location,8)) { + __atomic_store_explicit(&location, value, memory_order_seq_cst); + } else { + return __TBB_machine_generic_store8full_fence(&location,value); + } + + } +}; + +#endif +}} // namespace tbb::internal +template +inline void __TBB_machine_OR( T *operand, T addend ) { + __atomic_fetch_or_explicit(operand, addend, tbb::internal::memory_order_seq_cst); +} + +template +inline void __TBB_machine_AND( T *operand, T addend ) { + __atomic_fetch_and_explicit(operand, addend, tbb::internal::memory_order_seq_cst); +} + diff --git a/ohos/arm64-v8a/include/tbb/machine/linux_common.h b/ohos/arm64-v8a/include/tbb/machine/linux_common.h new file mode 100644 index 00000000..9dc6c813 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/linux_common.h @@ -0,0 +1,105 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_machine_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#include +#define __TBB_Yield() sched_yield() + +#include +/* Futex definitions */ +#include + +#if defined(SYS_futex) +/* This header file is included for Linux and some other systems that may support futexes.*/ + +#define __TBB_USE_FUTEX 1 + +#if defined(__has_include) +#define __TBB_has_include __has_include +#else +#define __TBB_has_include(x) 0 +#endif + +/* +If available, use typical headers where futex API is defined. While Linux and OpenBSD +are known to provide such headers, other systems might have them as well. +*/ +#if defined(__linux__) || __TBB_has_include() +#include +#elif defined(__OpenBSD__) || __TBB_has_include() +#include +#endif + +#include +#include + +/* +Some systems might not define the macros or use different names. In such case we expect +the actual parameter values to match Linux: 0 for wait, 1 for wake. +*/ +#if defined(FUTEX_WAIT_PRIVATE) +#define __TBB_FUTEX_WAIT FUTEX_WAIT_PRIVATE +#elif defined(FUTEX_WAIT) +#define __TBB_FUTEX_WAIT FUTEX_WAIT +#else +#define __TBB_FUTEX_WAIT 0 +#endif + +#if defined(FUTEX_WAKE_PRIVATE) +#define __TBB_FUTEX_WAKE FUTEX_WAKE_PRIVATE +#elif defined(FUTEX_WAKE) +#define __TBB_FUTEX_WAKE FUTEX_WAKE +#else +#define __TBB_FUTEX_WAKE 1 +#endif + +#ifndef __TBB_ASSERT +#error machine specific headers must be included after tbb_stddef.h +#endif + +namespace tbb { + +namespace internal { + +inline int futex_wait( void *futex, int comparand ) { + int r = syscall( SYS_futex,futex,__TBB_FUTEX_WAIT,comparand,NULL,NULL,0 ); +#if TBB_USE_ASSERT + int e = errno; + __TBB_ASSERT( r==0||r==EWOULDBLOCK||(r==-1&&(e==EAGAIN||e==EINTR)), "futex_wait failed." ); +#endif /* TBB_USE_ASSERT */ + return r; +} + +inline int futex_wakeup_one( void *futex ) { + int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,1,NULL,NULL,0 ); + __TBB_ASSERT( r==0||r==1, "futex_wakeup_one: more than one thread woken up?" ); + return r; +} + +inline int futex_wakeup_all( void *futex ) { + int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 ); + __TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" ); + return r; +} + +} /* namespace internal */ + +} /* namespace tbb */ + +#endif /* SYS_futex */ diff --git a/ohos/arm64-v8a/include/tbb/machine/linux_ia32.h b/ohos/arm64-v8a/include/tbb/machine/linux_ia32.h new file mode 100644 index 00000000..3942d8bf --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/linux_ia32.h @@ -0,0 +1,228 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia32_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_linux_ia32_H + +#include +#include "gcc_ia32_common.h" + +#define __TBB_WORDSIZE 4 +#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE + +#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") +#define __TBB_control_consistency_helper() __TBB_compiler_fence() +#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() +#define __TBB_release_consistency_helper() __TBB_compiler_fence() +#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") + +#if __TBB_ICC_ASM_VOLATILE_BROKEN +#define __TBB_VOLATILE +#else +#define __TBB_VOLATILE volatile +#endif + +#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X,R) \ +static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ +{ \ + T result; \ + \ + __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ + : "=a"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ + : "q"(value), "0"(comparand), "m"(*(__TBB_VOLATILE T*)ptr) \ + : "memory"); \ + return result; \ +} \ + \ +static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ +{ \ + T result; \ + __asm__ __volatile__("lock\nxadd" X " %0,%1" \ + : R (result), "=m"(*(__TBB_VOLATILE T*)ptr) \ + : "0"(addend), "m"(*(__TBB_VOLATILE T*)ptr) \ + : "memory"); \ + return result; \ +} \ + \ +static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ +{ \ + T result; \ + __asm__ __volatile__("lock\nxchg" X " %0,%1" \ + : R (result), "=m"(*(__TBB_VOLATILE T*)ptr) \ + : "0"(value), "m"(*(__TBB_VOLATILE T*)ptr) \ + : "memory"); \ + return result; \ +} \ + +__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"","=q") +__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"","=r") +__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"l","=r") + +#if __INTEL_COMPILER +#pragma warning( push ) +// reference to EBX in a function requiring stack alignment +#pragma warning( disable: 998 ) +#endif + +#if __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN +#define __TBB_IA32_CAS8_NOINLINE __attribute__ ((noinline)) +#else +#define __TBB_IA32_CAS8_NOINLINE +#endif + +static inline __TBB_IA32_CAS8_NOINLINE int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) { +//TODO: remove the extra part of condition once __TBB_GCC_BUILTIN_ATOMICS_PRESENT is lowered to gcc version 4.1.2 +#if (__TBB_GCC_BUILTIN_ATOMICS_PRESENT || (__TBB_GCC_VERSION >= 40102)) && !__TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN + return __sync_val_compare_and_swap( reinterpret_cast(ptr), comparand, value ); +#else /* !__TBB_GCC_BUILTIN_ATOMICS_PRESENT */ + //TODO: look like ICC 13.0 has some issues with this code, investigate it more deeply + int64_t result; + union { + int64_t i64; + int32_t i32[2]; + }; + i64 = value; +#if __PIC__ + /* compiling position-independent code */ + // EBX register preserved for compliance with position-independent code rules on IA32 + int32_t tmp; + __asm__ __volatile__ ( + "movl %%ebx,%2\n\t" + "movl %5,%%ebx\n\t" +#if __GNUC__==3 + "lock\n\t cmpxchg8b %1\n\t" +#else + "lock\n\t cmpxchg8b (%3)\n\t" +#endif + "movl %2,%%ebx" + : "=A"(result) + , "=m"(*(__TBB_VOLATILE int64_t *)ptr) + , "=m"(tmp) +#if __GNUC__==3 + : "m"(*(__TBB_VOLATILE int64_t *)ptr) +#else + : "SD"(ptr) +#endif + , "0"(comparand) + , "m"(i32[0]), "c"(i32[1]) + : "memory" +#if __INTEL_COMPILER + ,"ebx" +#endif + ); +#else /* !__PIC__ */ + __asm__ __volatile__ ( + "lock\n\t cmpxchg8b %1\n\t" + : "=A"(result), "=m"(*(__TBB_VOLATILE int64_t *)ptr) + : "m"(*(__TBB_VOLATILE int64_t *)ptr) + , "0"(comparand) + , "b"(i32[0]), "c"(i32[1]) + : "memory" + ); +#endif /* __PIC__ */ + return result; +#endif /* !__TBB_GCC_BUILTIN_ATOMICS_PRESENT */ +} + +#undef __TBB_IA32_CAS8_NOINLINE + +#if __INTEL_COMPILER +#pragma warning( pop ) +#endif // warning 998 is back + +static inline void __TBB_machine_or( volatile void *ptr, uint32_t addend ) { + __asm__ __volatile__("lock\norl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); +} + +static inline void __TBB_machine_and( volatile void *ptr, uint32_t addend ) { + __asm__ __volatile__("lock\nandl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); +} + +//TODO: Check if it possible and profitable for IA-32 architecture on (Linux* and Windows*) +//to use of 64-bit load/store via floating point registers together with full fence +//for sequentially consistent load/store, instead of CAS. + +#if __clang__ +#define __TBB_fildq "fildll" +#define __TBB_fistpq "fistpll" +#else +#define __TBB_fildq "fildq" +#define __TBB_fistpq "fistpq" +#endif + +static inline int64_t __TBB_machine_aligned_load8 (const volatile void *ptr) { + __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),"__TBB_machine_aligned_load8 should be used with 8 byte aligned locations only \n"); + int64_t result; + __asm__ __volatile__ ( __TBB_fildq " %1\n\t" + __TBB_fistpq " %0" : "=m"(result) : "m"(*(const __TBB_VOLATILE uint64_t*)ptr) : "memory" ); + return result; +} + +static inline void __TBB_machine_aligned_store8 (volatile void *ptr, int64_t value ) { + __TBB_ASSERT(tbb::internal::is_aligned(ptr,8),"__TBB_machine_aligned_store8 should be used with 8 byte aligned locations only \n"); + // Aligned store + __asm__ __volatile__ ( __TBB_fildq " %1\n\t" + __TBB_fistpq " %0" : "=m"(*(__TBB_VOLATILE int64_t*)ptr) : "m"(value) : "memory" ); +} + +static inline int64_t __TBB_machine_load8 (const volatile void *ptr) { +#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN + if( tbb::internal::is_aligned(ptr,8)) { +#endif + return __TBB_machine_aligned_load8(ptr); +#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN + } else { + // Unaligned load + return __TBB_machine_cmpswp8(const_cast(ptr),0,0); + } +#endif +} + +//! Handles misaligned 8-byte store +/** Defined in tbb_misc.cpp */ +extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ); +extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ); + +static inline void __TBB_machine_store8(volatile void *ptr, int64_t value) { +#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN + if( tbb::internal::is_aligned(ptr,8)) { +#endif + __TBB_machine_aligned_store8(ptr,value); +#if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN + } else { + // Unaligned store +#if TBB_USE_PERFORMANCE_WARNINGS + __TBB_machine_store8_slow_perf_warning(ptr); +#endif /* TBB_USE_PERFORMANCE_WARNINGS */ + __TBB_machine_store8_slow(ptr,value); + } +#endif +} + +// Machine specific atomic operations +#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) +#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) + +#define __TBB_USE_GENERIC_DWORD_FETCH_ADD 1 +#define __TBB_USE_GENERIC_DWORD_FETCH_STORE 1 +#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + diff --git a/ohos/arm64-v8a/include/tbb/machine/linux_ia64.h b/ohos/arm64-v8a/include/tbb/machine/linux_ia64.h new file mode 100644 index 00000000..28b2bc41 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/linux_ia64.h @@ -0,0 +1,177 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_ia64_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_linux_ia64_H + +#include +#include + +#define __TBB_WORDSIZE 8 +#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE + +#if __INTEL_COMPILER + #define __TBB_compiler_fence() + #define __TBB_control_consistency_helper() __TBB_compiler_fence() + #define __TBB_acquire_consistency_helper() + #define __TBB_release_consistency_helper() + #define __TBB_full_memory_fence() __mf() +#else + #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") + #define __TBB_control_consistency_helper() __TBB_compiler_fence() + // Even though GCC imbues volatile loads with acquire semantics, it sometimes moves + // loads over the acquire fence. The following helpers stop such incorrect code motion. + #define __TBB_acquire_consistency_helper() __TBB_compiler_fence() + #define __TBB_release_consistency_helper() __TBB_compiler_fence() + #define __TBB_full_memory_fence() __asm__ __volatile__("mf": : :"memory") +#endif /* !__INTEL_COMPILER */ + +// Most of the functions will be in a .s file +// TODO: revise dynamic_link, memory pools and etc. if the library dependency is removed. + +extern "C" { + int8_t __TBB_machine_fetchadd1__TBB_full_fence (volatile void *ptr, int8_t addend); + int8_t __TBB_machine_fetchadd1acquire(volatile void *ptr, int8_t addend); + int8_t __TBB_machine_fetchadd1release(volatile void *ptr, int8_t addend); + + int16_t __TBB_machine_fetchadd2__TBB_full_fence (volatile void *ptr, int16_t addend); + int16_t __TBB_machine_fetchadd2acquire(volatile void *ptr, int16_t addend); + int16_t __TBB_machine_fetchadd2release(volatile void *ptr, int16_t addend); + + int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value); + int32_t __TBB_machine_fetchadd4acquire(volatile void *ptr, int32_t addend); + int32_t __TBB_machine_fetchadd4release(volatile void *ptr, int32_t addend); + + int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value); + int64_t __TBB_machine_fetchadd8acquire(volatile void *ptr, int64_t addend); + int64_t __TBB_machine_fetchadd8release(volatile void *ptr, int64_t addend); + + int8_t __TBB_machine_fetchstore1__TBB_full_fence (volatile void *ptr, int8_t value); + int8_t __TBB_machine_fetchstore1acquire(volatile void *ptr, int8_t value); + int8_t __TBB_machine_fetchstore1release(volatile void *ptr, int8_t value); + + int16_t __TBB_machine_fetchstore2__TBB_full_fence (volatile void *ptr, int16_t value); + int16_t __TBB_machine_fetchstore2acquire(volatile void *ptr, int16_t value); + int16_t __TBB_machine_fetchstore2release(volatile void *ptr, int16_t value); + + int32_t __TBB_machine_fetchstore4__TBB_full_fence (volatile void *ptr, int32_t value); + int32_t __TBB_machine_fetchstore4acquire(volatile void *ptr, int32_t value); + int32_t __TBB_machine_fetchstore4release(volatile void *ptr, int32_t value); + + int64_t __TBB_machine_fetchstore8__TBB_full_fence (volatile void *ptr, int64_t value); + int64_t __TBB_machine_fetchstore8acquire(volatile void *ptr, int64_t value); + int64_t __TBB_machine_fetchstore8release(volatile void *ptr, int64_t value); + + int8_t __TBB_machine_cmpswp1__TBB_full_fence (volatile void *ptr, int8_t value, int8_t comparand); + int8_t __TBB_machine_cmpswp1acquire(volatile void *ptr, int8_t value, int8_t comparand); + int8_t __TBB_machine_cmpswp1release(volatile void *ptr, int8_t value, int8_t comparand); + + int16_t __TBB_machine_cmpswp2__TBB_full_fence (volatile void *ptr, int16_t value, int16_t comparand); + int16_t __TBB_machine_cmpswp2acquire(volatile void *ptr, int16_t value, int16_t comparand); + int16_t __TBB_machine_cmpswp2release(volatile void *ptr, int16_t value, int16_t comparand); + + int32_t __TBB_machine_cmpswp4__TBB_full_fence (volatile void *ptr, int32_t value, int32_t comparand); + int32_t __TBB_machine_cmpswp4acquire(volatile void *ptr, int32_t value, int32_t comparand); + int32_t __TBB_machine_cmpswp4release(volatile void *ptr, int32_t value, int32_t comparand); + + int64_t __TBB_machine_cmpswp8__TBB_full_fence (volatile void *ptr, int64_t value, int64_t comparand); + int64_t __TBB_machine_cmpswp8acquire(volatile void *ptr, int64_t value, int64_t comparand); + int64_t __TBB_machine_cmpswp8release(volatile void *ptr, int64_t value, int64_t comparand); + + int64_t __TBB_machine_lg(uint64_t value); + void __TBB_machine_pause(int32_t delay); + bool __TBB_machine_trylockbyte( volatile unsigned char &ptr ); + int64_t __TBB_machine_lockbyte( volatile unsigned char &ptr ); + + //! Retrieves the current RSE backing store pointer. IA64 specific. + void* __TBB_get_bsp(); + + int32_t __TBB_machine_load1_relaxed(const void *ptr); + int32_t __TBB_machine_load2_relaxed(const void *ptr); + int32_t __TBB_machine_load4_relaxed(const void *ptr); + int64_t __TBB_machine_load8_relaxed(const void *ptr); + + void __TBB_machine_store1_relaxed(void *ptr, int32_t value); + void __TBB_machine_store2_relaxed(void *ptr, int32_t value); + void __TBB_machine_store4_relaxed(void *ptr, int32_t value); + void __TBB_machine_store8_relaxed(void *ptr, int64_t value); +} // extern "C" + +// Mapping old entry points to the names corresponding to the new full_fence identifier. +#define __TBB_machine_fetchadd1full_fence __TBB_machine_fetchadd1__TBB_full_fence +#define __TBB_machine_fetchadd2full_fence __TBB_machine_fetchadd2__TBB_full_fence +#define __TBB_machine_fetchadd4full_fence __TBB_machine_fetchadd4__TBB_full_fence +#define __TBB_machine_fetchadd8full_fence __TBB_machine_fetchadd8__TBB_full_fence +#define __TBB_machine_fetchstore1full_fence __TBB_machine_fetchstore1__TBB_full_fence +#define __TBB_machine_fetchstore2full_fence __TBB_machine_fetchstore2__TBB_full_fence +#define __TBB_machine_fetchstore4full_fence __TBB_machine_fetchstore4__TBB_full_fence +#define __TBB_machine_fetchstore8full_fence __TBB_machine_fetchstore8__TBB_full_fence +#define __TBB_machine_cmpswp1full_fence __TBB_machine_cmpswp1__TBB_full_fence +#define __TBB_machine_cmpswp2full_fence __TBB_machine_cmpswp2__TBB_full_fence +#define __TBB_machine_cmpswp4full_fence __TBB_machine_cmpswp4__TBB_full_fence +#define __TBB_machine_cmpswp8full_fence __TBB_machine_cmpswp8__TBB_full_fence + +// Mapping relaxed operations to the entry points implementing them. +/** On IA64 RMW operations implicitly have acquire semantics. Thus one cannot + actually have completely relaxed RMW operation here. **/ +#define __TBB_machine_fetchadd1relaxed __TBB_machine_fetchadd1acquire +#define __TBB_machine_fetchadd2relaxed __TBB_machine_fetchadd2acquire +#define __TBB_machine_fetchadd4relaxed __TBB_machine_fetchadd4acquire +#define __TBB_machine_fetchadd8relaxed __TBB_machine_fetchadd8acquire +#define __TBB_machine_fetchstore1relaxed __TBB_machine_fetchstore1acquire +#define __TBB_machine_fetchstore2relaxed __TBB_machine_fetchstore2acquire +#define __TBB_machine_fetchstore4relaxed __TBB_machine_fetchstore4acquire +#define __TBB_machine_fetchstore8relaxed __TBB_machine_fetchstore8acquire +#define __TBB_machine_cmpswp1relaxed __TBB_machine_cmpswp1acquire +#define __TBB_machine_cmpswp2relaxed __TBB_machine_cmpswp2acquire +#define __TBB_machine_cmpswp4relaxed __TBB_machine_cmpswp4acquire +#define __TBB_machine_cmpswp8relaxed __TBB_machine_cmpswp8acquire + +#define __TBB_MACHINE_DEFINE_ATOMICS(S,V) \ + template \ + struct machine_load_store_relaxed { \ + static inline T load ( const T& location ) { \ + return (T)__TBB_machine_load##S##_relaxed(&location); \ + } \ + static inline void store ( T& location, T value ) { \ + __TBB_machine_store##S##_relaxed(&location, (V)value); \ + } \ + } + +namespace tbb { +namespace internal { + __TBB_MACHINE_DEFINE_ATOMICS(1,int8_t); + __TBB_MACHINE_DEFINE_ATOMICS(2,int16_t); + __TBB_MACHINE_DEFINE_ATOMICS(4,int32_t); + __TBB_MACHINE_DEFINE_ATOMICS(8,int64_t); +}} // namespaces internal, tbb + +#undef __TBB_MACHINE_DEFINE_ATOMICS + +#define __TBB_USE_FENCED_ATOMICS 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + +// Definition of Lock functions +#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) +#define __TBB_LockByte(P) __TBB_machine_lockbyte(P) + +// Definition of other utility functions +#define __TBB_Pause(V) __TBB_machine_pause(V) +#define __TBB_Log2(V) __TBB_machine_lg(V) diff --git a/ohos/arm64-v8a/include/tbb/machine/linux_intel64.h b/ohos/arm64-v8a/include/tbb/machine/linux_intel64.h new file mode 100644 index 00000000..907ead52 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/linux_intel64.h @@ -0,0 +1,92 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_intel64_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_linux_intel64_H + +#include +#include "gcc_ia32_common.h" + +#define __TBB_WORDSIZE 8 +#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE + +#define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") +#define __TBB_control_consistency_helper() __TBB_compiler_fence() +#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() +#define __TBB_release_consistency_helper() __TBB_compiler_fence() + +#ifndef __TBB_full_memory_fence +#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") +#endif + +#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X) \ +static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ +{ \ + T result; \ + \ + __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ + : "=a"(result), "=m"(*(volatile T*)ptr) \ + : "q"(value), "0"(comparand), "m"(*(volatile T*)ptr) \ + : "memory"); \ + return result; \ +} \ + \ +static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ +{ \ + T result; \ + __asm__ __volatile__("lock\nxadd" X " %0,%1" \ + : "=r"(result),"=m"(*(volatile T*)ptr) \ + : "0"(addend), "m"(*(volatile T*)ptr) \ + : "memory"); \ + return result; \ +} \ + \ +static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ +{ \ + T result; \ + __asm__ __volatile__("lock\nxchg" X " %0,%1" \ + : "=r"(result),"=m"(*(volatile T*)ptr) \ + : "0"(value), "m"(*(volatile T*)ptr) \ + : "memory"); \ + return result; \ +} \ + +__TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"") +__TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"") +__TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"") +__TBB_MACHINE_DEFINE_ATOMICS(8,int64_t,"q") + +#undef __TBB_MACHINE_DEFINE_ATOMICS + +static inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) { + __asm__ __volatile__("lock\norq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(value), "m"(*(volatile uint64_t*)ptr) : "memory"); +} + +static inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) { + __asm__ __volatile__("lock\nandq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(value), "m"(*(volatile uint64_t*)ptr) : "memory"); +} + +#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) +#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) + +#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + diff --git a/ohos/arm64-v8a/include/tbb/machine/mac_ppc.h b/ohos/arm64-v8a/include/tbb/machine/mac_ppc.h new file mode 100644 index 00000000..2eb5ad3a --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/mac_ppc.h @@ -0,0 +1,309 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_gcc_power_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_gcc_power_H + +#include +#include + +// TODO: rename to gcc_power.h? +// This file is for Power Architecture with compilers supporting GNU inline-assembler syntax (currently GNU g++ and IBM XL). +// Note that XL V9.0 (sometimes?) has trouble dealing with empty input and/or clobber lists, so they should be avoided. + +#if __powerpc64__ || __ppc64__ + // IBM XL documents __powerpc64__ (and __PPC64__). + // Apple documents __ppc64__ (with __ppc__ only on 32-bit). + #define __TBB_WORDSIZE 8 +#else + #define __TBB_WORDSIZE 4 +#endif + +// Traditionally Power Architecture is big-endian. +// Little-endian could be just an address manipulation (compatibility with TBB not verified), +// or normal little-endian (on more recent systems). Embedded PowerPC systems may support +// page-specific endianness, but then one endianness must be hidden from TBB so that it still sees only one. +#if __BIG_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG +#elif __LITTLE_ENDIAN__ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE +#elif defined(__BYTE_ORDER__) + #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED +#else + #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT +#endif + +// On Power Architecture, (lock-free) 64-bit atomics require 64-bit hardware: +#if __TBB_WORDSIZE==8 + // Do not change the following definition, because TBB itself will use 64-bit atomics in 64-bit builds. + #define __TBB_64BIT_ATOMICS 1 +#elif __bgp__ + // Do not change the following definition, because this is known 32-bit hardware. + #define __TBB_64BIT_ATOMICS 0 +#else + // To enable 64-bit atomics in 32-bit builds, set the value below to 1 instead of 0. + // You must make certain that the program will only use them on actual 64-bit hardware + // (which typically means that the entire program is only executed on such hardware), + // because their implementation involves machine instructions that are illegal elsewhere. + // The setting can be chosen independently per compilation unit, + // which also means that TBB itself does not need to be rebuilt. + // Alternatively (but only for the current architecture and TBB version), + // override the default as a predefined macro when invoking the compiler. + #ifndef __TBB_64BIT_ATOMICS + #define __TBB_64BIT_ATOMICS 0 + #endif +#endif + +inline int32_t __TBB_machine_cmpswp4 (volatile void *ptr, int32_t value, int32_t comparand ) +{ + int32_t result; + + __asm__ __volatile__("sync\n" + "0:\n\t" + "lwarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ + "cmpw %[res],%[cmp]\n\t" /* compare against comparand */ + "bne- 1f\n\t" /* exit if not same */ + "stwcx. %[val],0,%[ptr]\n\t" /* store new value */ + "bne- 0b\n" /* retry if reservation lost */ + "1:\n\t" /* the exit */ + "isync" + : [res]"=&r"(result) + , "+m"(* (int32_t*) ptr) /* redundant with "memory" */ + : [ptr]"r"(ptr) + , [val]"r"(value) + , [cmp]"r"(comparand) + : "memory" /* compiler full fence */ + , "cr0" /* clobbered by cmp and/or stwcx. */ + ); + return result; +} + +#if __TBB_WORDSIZE==8 + +inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) +{ + int64_t result; + __asm__ __volatile__("sync\n" + "0:\n\t" + "ldarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ + "cmpd %[res],%[cmp]\n\t" /* compare against comparand */ + "bne- 1f\n\t" /* exit if not same */ + "stdcx. %[val],0,%[ptr]\n\t" /* store new value */ + "bne- 0b\n" /* retry if reservation lost */ + "1:\n\t" /* the exit */ + "isync" + : [res]"=&r"(result) + , "+m"(* (int64_t*) ptr) /* redundant with "memory" */ + : [ptr]"r"(ptr) + , [val]"r"(value) + , [cmp]"r"(comparand) + : "memory" /* compiler full fence */ + , "cr0" /* clobbered by cmp and/or stdcx. */ + ); + return result; +} + +#elif __TBB_64BIT_ATOMICS /* && __TBB_WORDSIZE==4 */ + +inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) +{ + int64_t result; + int64_t value_register, comparand_register, result_register; // dummy variables to allocate registers + __asm__ __volatile__("sync\n\t" + "ld %[val],%[valm]\n\t" + "ld %[cmp],%[cmpm]\n" + "0:\n\t" + "ldarx %[res],0,%[ptr]\n\t" /* load w/ reservation */ + "cmpd %[res],%[cmp]\n\t" /* compare against comparand */ + "bne- 1f\n\t" /* exit if not same */ + "stdcx. %[val],0,%[ptr]\n\t" /* store new value */ + "bne- 0b\n" /* retry if reservation lost */ + "1:\n\t" /* the exit */ + "std %[res],%[resm]\n\t" + "isync" + : [resm]"=m"(result) + , [res] "=&r"( result_register) + , [val] "=&r"( value_register) + , [cmp] "=&r"(comparand_register) + , "+m"(* (int64_t*) ptr) /* redundant with "memory" */ + : [ptr] "r"(ptr) + , [valm]"m"(value) + , [cmpm]"m"(comparand) + : "memory" /* compiler full fence */ + , "cr0" /* clobbered by cmpd and/or stdcx. */ + ); + return result; +} + +#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ + +#define __TBB_MACHINE_DEFINE_LOAD_STORE(S,ldx,stx,cmpx) \ + template \ + struct machine_load_store { \ + static inline T load_with_acquire(const volatile T& location) { \ + T result; \ + __asm__ __volatile__(ldx " %[res],0(%[ptr])\n" \ + "0:\n\t" \ + cmpx " %[res],%[res]\n\t" \ + "bne- 0b\n\t" \ + "isync" \ + : [res]"=r"(result) \ + : [ptr]"b"(&location) /* cannot use register 0 here */ \ + , "m"(location) /* redundant with "memory" */ \ + : "memory" /* compiler acquire fence */ \ + , "cr0" /* clobbered by cmpw/cmpd */); \ + return result; \ + } \ + static inline void store_with_release(volatile T &location, T value) { \ + __asm__ __volatile__("lwsync\n\t" \ + stx " %[val],0(%[ptr])" \ + : "=m"(location) /* redundant with "memory" */ \ + : [ptr]"b"(&location) /* cannot use register 0 here */ \ + , [val]"r"(value) \ + : "memory"/*compiler release fence*/ /*(cr0 not affected)*/); \ + } \ + }; \ + \ + template \ + struct machine_load_store_relaxed { \ + static inline T load (const __TBB_atomic T& location) { \ + T result; \ + __asm__ __volatile__(ldx " %[res],0(%[ptr])" \ + : [res]"=r"(result) \ + : [ptr]"b"(&location) /* cannot use register 0 here */ \ + , "m"(location) \ + ); /*(no compiler fence)*/ /*(cr0 not affected)*/ \ + return result; \ + } \ + static inline void store (__TBB_atomic T &location, T value) { \ + __asm__ __volatile__(stx " %[val],0(%[ptr])" \ + : "=m"(location) \ + : [ptr]"b"(&location) /* cannot use register 0 here */ \ + , [val]"r"(value) \ + ); /*(no compiler fence)*/ /*(cr0 not affected)*/ \ + } \ + }; + +namespace tbb { +namespace internal { + __TBB_MACHINE_DEFINE_LOAD_STORE(1,"lbz","stb","cmpw") + __TBB_MACHINE_DEFINE_LOAD_STORE(2,"lhz","sth","cmpw") + __TBB_MACHINE_DEFINE_LOAD_STORE(4,"lwz","stw","cmpw") + +#if __TBB_WORDSIZE==8 + + __TBB_MACHINE_DEFINE_LOAD_STORE(8,"ld" ,"std","cmpd") + +#elif __TBB_64BIT_ATOMICS /* && __TBB_WORDSIZE==4 */ + + template + struct machine_load_store { + static inline T load_with_acquire(const volatile T& location) { + T result; + T result_register; // dummy variable to allocate a register + __asm__ __volatile__("ld %[res],0(%[ptr])\n\t" + "std %[res],%[resm]\n" + "0:\n\t" + "cmpd %[res],%[res]\n\t" + "bne- 0b\n\t" + "isync" + : [resm]"=m"(result) + , [res]"=&r"(result_register) + : [ptr]"b"(&location) /* cannot use register 0 here */ + , "m"(location) /* redundant with "memory" */ + : "memory" /* compiler acquire fence */ + , "cr0" /* clobbered by cmpd */); + return result; + } + + static inline void store_with_release(volatile T &location, T value) { + T value_register; // dummy variable to allocate a register + __asm__ __volatile__("lwsync\n\t" + "ld %[val],%[valm]\n\t" + "std %[val],0(%[ptr])" + : "=m"(location) /* redundant with "memory" */ + , [val]"=&r"(value_register) + : [ptr]"b"(&location) /* cannot use register 0 here */ + , [valm]"m"(value) + : "memory"/*compiler release fence*/ /*(cr0 not affected)*/); + } + }; + + struct machine_load_store_relaxed { + static inline T load (const volatile T& location) { + T result; + T result_register; // dummy variable to allocate a register + __asm__ __volatile__("ld %[res],0(%[ptr])\n\t" + "std %[res],%[resm]" + : [resm]"=m"(result) + , [res]"=&r"(result_register) + : [ptr]"b"(&location) /* cannot use register 0 here */ + , "m"(location) + ); /*(no compiler fence)*/ /*(cr0 not affected)*/ + return result; + } + + static inline void store (volatile T &location, T value) { + T value_register; // dummy variable to allocate a register + __asm__ __volatile__("ld %[val],%[valm]\n\t" + "std %[val],0(%[ptr])" + : "=m"(location) + , [val]"=&r"(value_register) + : [ptr]"b"(&location) /* cannot use register 0 here */ + , [valm]"m"(value) + ); /*(no compiler fence)*/ /*(cr0 not affected)*/ + } + }; + #define __TBB_machine_load_store_relaxed_8 + +#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ + +}} // namespaces internal, tbb + +#undef __TBB_MACHINE_DEFINE_LOAD_STORE + +#define __TBB_USE_GENERIC_PART_WORD_CAS 1 +#define __TBB_USE_GENERIC_FETCH_ADD 1 +#define __TBB_USE_GENERIC_FETCH_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + +#define __TBB_control_consistency_helper() __asm__ __volatile__("isync": : :"memory") +#define __TBB_full_memory_fence() __asm__ __volatile__( "sync": : :"memory") + +static inline intptr_t __TBB_machine_lg( uintptr_t x ) { + __TBB_ASSERT(x, "__TBB_Log2(0) undefined"); + // cntlzd/cntlzw starts counting at 2^63/2^31 (ignoring any higher-order bits), and does not affect cr0 +#if __TBB_WORDSIZE==8 + __asm__ __volatile__ ("cntlzd %0,%0" : "+r"(x)); + return 63-static_cast(x); +#else + __asm__ __volatile__ ("cntlzw %0,%0" : "+r"(x)); + return 31-static_cast(x); +#endif +} +#define __TBB_Log2(V) __TBB_machine_lg(V) + +// Assumes implicit alignment for any 32-bit value +typedef uint32_t __TBB_Flag; +#define __TBB_Flag __TBB_Flag + +inline bool __TBB_machine_trylockbyte( __TBB_atomic __TBB_Flag &flag ) { + return __TBB_machine_cmpswp4(&flag,1,0)==0; +} +#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) diff --git a/ohos/arm64-v8a/include/tbb/machine/macos_common.h b/ohos/arm64-v8a/include/tbb/machine/macos_common.h new file mode 100644 index 00000000..87bb5e3e --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/macos_common.h @@ -0,0 +1,129 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_macos_common_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_macos_common_H + +#include +#define __TBB_Yield() sched_yield() + +// __TBB_HardwareConcurrency + +#include +#include + +static inline int __TBB_macos_available_cpu() { + int name[2] = {CTL_HW, HW_AVAILCPU}; + int ncpu; + size_t size = sizeof(ncpu); + sysctl( name, 2, &ncpu, &size, NULL, 0 ); + return ncpu; +} + +#define __TBB_HardwareConcurrency() __TBB_macos_available_cpu() + +#ifndef __TBB_full_memory_fence + // TBB has not recognized the architecture (none of the architecture abstraction + // headers was included). + #define __TBB_UnknownArchitecture 1 +#endif + +#if __TBB_UnknownArchitecture +// Implementation of atomic operations based on OS provided primitives +#include + +static inline int64_t __TBB_machine_cmpswp8_OsX(volatile void *ptr, int64_t value, int64_t comparand) +{ + __TBB_ASSERT( tbb::internal::is_aligned(ptr,8), "address not properly aligned for macOS* atomics"); + int64_t* address = (int64_t*)ptr; + while( !OSAtomicCompareAndSwap64Barrier(comparand, value, address) ){ +#if __TBB_WORDSIZE==8 + int64_t snapshot = *address; +#else + int64_t snapshot = OSAtomicAdd64( 0, address ); +#endif + if( snapshot!=comparand ) return snapshot; + } + return comparand; +} + +#define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8_OsX + +#endif /* __TBB_UnknownArchitecture */ + +#if __TBB_UnknownArchitecture + +#ifndef __TBB_WORDSIZE +#define __TBB_WORDSIZE __SIZEOF_POINTER__ +#endif + +#ifdef __TBB_ENDIANNESS + // Already determined based on hardware architecture. +#elif __BIG_ENDIAN__ + #define __TBB_ENDIANNESS __TBB_ENDIAN_BIG +#elif __LITTLE_ENDIAN__ + #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE +#else + #define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED +#endif + +/** As this generic implementation has absolutely no information about underlying + hardware, its performance most likely will be sub-optimal because of full memory + fence usages where a more lightweight synchronization means (or none at all) + could suffice. Thus if you use this header to enable TBB on a new platform, + consider forking it and relaxing below helpers as appropriate. **/ +#define __TBB_control_consistency_helper() OSMemoryBarrier() +#define __TBB_acquire_consistency_helper() OSMemoryBarrier() +#define __TBB_release_consistency_helper() OSMemoryBarrier() +#define __TBB_full_memory_fence() OSMemoryBarrier() + +static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand) +{ + __TBB_ASSERT( tbb::internal::is_aligned(ptr,4), "address not properly aligned for macOS atomics"); + int32_t* address = (int32_t*)ptr; + while( !OSAtomicCompareAndSwap32Barrier(comparand, value, address) ){ + int32_t snapshot = *address; + if( snapshot!=comparand ) return snapshot; + } + return comparand; +} + +static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend) +{ + __TBB_ASSERT( tbb::internal::is_aligned(ptr,4), "address not properly aligned for macOS atomics"); + return OSAtomicAdd32Barrier(addend, (int32_t*)ptr) - addend; +} + +static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend) +{ + __TBB_ASSERT( tbb::internal::is_aligned(ptr,8), "address not properly aligned for macOS atomics"); + return OSAtomicAdd64Barrier(addend, (int64_t*)ptr) - addend; +} + +#define __TBB_USE_GENERIC_PART_WORD_CAS 1 +#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 +#define __TBB_USE_GENERIC_FETCH_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#if __TBB_WORDSIZE == 4 + #define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 +#endif +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + +#endif /* __TBB_UnknownArchitecture */ diff --git a/ohos/arm64-v8a/include/tbb/machine/mic_common.h b/ohos/arm64-v8a/include/tbb/machine/mic_common.h new file mode 100644 index 00000000..8c844f1d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/mic_common.h @@ -0,0 +1,53 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_mic_common_H +#define __TBB_mic_common_H + +#ifndef __TBB_machine_H +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#if ! __TBB_DEFINE_MIC + #error mic_common.h should be included only when building for Intel(R) Many Integrated Core Architecture +#endif + +#ifndef __TBB_PREFETCHING +#define __TBB_PREFETCHING 1 +#endif +#if __TBB_PREFETCHING +#include +#define __TBB_cl_prefetch(p) _mm_prefetch((const char*)p, _MM_HINT_T1) +#define __TBB_cl_evict(p) _mm_clevict(p, _MM_HINT_T1) +#endif + +/** Intel(R) Many Integrated Core Architecture does not support mfence and pause instructions **/ +#define __TBB_full_memory_fence() __asm__ __volatile__("lock; addl $0,(%%rsp)":::"memory") +#define __TBB_Pause(x) _mm_delay_32(16*(x)) +#define __TBB_STEALING_PAUSE 1500/16 +#include +#define __TBB_Yield() sched_yield() + +/** Specifics **/ +#define __TBB_STEALING_ABORT_ON_CONTENTION 1 +#define __TBB_YIELD2P 1 +#define __TBB_HOARD_NONLOCAL_TASKS 1 + +#if ! ( __FreeBSD__ || __linux__ ) + #error Intel(R) Many Integrated Core Compiler does not define __FreeBSD__ or __linux__ anymore. Check for the __TBB_XXX_BROKEN defined under __FreeBSD__ or __linux__. +#endif /* ! ( __FreeBSD__ || __linux__ ) */ + +#endif /* __TBB_mic_common_H */ diff --git a/ohos/arm64-v8a/include/tbb/machine/msvc_armv7.h b/ohos/arm64-v8a/include/tbb/machine/msvc_armv7.h new file mode 100644 index 00000000..e83c077e --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/msvc_armv7.h @@ -0,0 +1,167 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_msvc_armv7_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_msvc_armv7_H + +#include +#include + +#define __TBB_WORDSIZE 4 + +#define __TBB_ENDIANNESS __TBB_ENDIAN_UNSUPPORTED + +#if defined(TBB_WIN32_USE_CL_BUILTINS) +// We can test this on _M_IX86 +#pragma intrinsic(_ReadWriteBarrier) +#pragma intrinsic(_mm_mfence) +#define __TBB_compiler_fence() _ReadWriteBarrier() +#define __TBB_full_memory_fence() _mm_mfence() +#define __TBB_control_consistency_helper() __TBB_compiler_fence() +#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() +#define __TBB_release_consistency_helper() __TBB_compiler_fence() +#else +//Now __dmb(_ARM_BARRIER_SY) is used for both compiler and memory fences +//This might be changed later after testing +#define __TBB_compiler_fence() __dmb(_ARM_BARRIER_SY) +#define __TBB_full_memory_fence() __dmb(_ARM_BARRIER_SY) +#define __TBB_control_consistency_helper() __TBB_compiler_fence() +#define __TBB_acquire_consistency_helper() __TBB_full_memory_fence() +#define __TBB_release_consistency_helper() __TBB_full_memory_fence() +#endif + +//-------------------------------------------------- +// Compare and swap +//-------------------------------------------------- + +/** + * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr + * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand + * @param value value to assign *ptr to if *ptr==comparand + * @param comparand value to compare with *ptr + * @return value originally in memory at ptr, regardless of success +*/ + +#define __TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(S,T,F) \ +inline T __TBB_machine_cmpswp##S( volatile void *ptr, T value, T comparand ) { \ + return _InterlockedCompareExchange##F(reinterpret_cast(ptr),value,comparand); \ +} \ + +#define __TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(S,T,F) \ +inline T __TBB_machine_fetchadd##S( volatile void *ptr, T value ) { \ + return _InterlockedExchangeAdd##F(reinterpret_cast(ptr),value); \ +} \ + +__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(1,char,8) +__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(2,short,16) +__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(4,long,) +__TBB_MACHINE_DEFINE_ATOMICS_CMPSWP(8,__int64,64) +__TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(4,long,) +#if defined(TBB_WIN32_USE_CL_BUILTINS) +// No _InterlockedExchangeAdd64 intrinsic on _M_IX86 +#define __TBB_64BIT_ATOMICS 0 +#else +__TBB_MACHINE_DEFINE_ATOMICS_FETCHADD(8,__int64,64) +#endif + +inline void __TBB_machine_pause (int32_t delay ) +{ + while(delay>0) + { + __TBB_compiler_fence(); + delay--; + } +} + +// API to retrieve/update FPU control setting +#define __TBB_CPU_CTL_ENV_PRESENT 1 + +namespace tbb { +namespace internal { + +template +struct machine_load_store_relaxed { + static inline T load ( const volatile T& location ) { + const T value = location; + + /* + * An extra memory barrier is required for errata #761319 + * Please see http://infocenter.arm.com/help/topic/com.arm.doc.uan0004a + */ + __TBB_acquire_consistency_helper(); + return value; + } + + static inline void store ( volatile T& location, T value ) { + location = value; + } +}; + +class cpu_ctl_env { +private: + unsigned int my_ctl; +public: + bool operator!=( const cpu_ctl_env& ctl ) const { return my_ctl != ctl.my_ctl; } + void get_env() { my_ctl = _control87(0, 0); } + void set_env() const { _control87( my_ctl, ~0U ); } +}; + +} // namespace internal +} // namespaces tbb + +// Machine specific atomic operations +#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) +#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) +#define __TBB_Pause(V) __TBB_machine_pause(V) + +// Use generics for some things +#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 +#define __TBB_USE_GENERIC_PART_WORD_FETCH_STORE 1 +#define __TBB_USE_GENERIC_FETCH_STORE 1 +#define __TBB_USE_GENERIC_DWORD_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + +#if defined(TBB_WIN32_USE_CL_BUILTINS) +#if !__TBB_WIN8UI_SUPPORT +extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); +#define __TBB_Yield() SwitchToThread() +#else +#include +#define __TBB_Yield() std::this_thread::yield() +#endif +#else +#define __TBB_Yield() __yield() +#endif + +// Machine specific atomic operations +#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) +#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) + +template +inline void __TBB_machine_OR( T1 *operand, T2 addend ) { + _InterlockedOr((long volatile *)operand, (long)addend); +} + +template +inline void __TBB_machine_AND( T1 *operand, T2 addend ) { + _InterlockedAnd((long volatile *)operand, (long)addend); +} + diff --git a/ohos/arm64-v8a/include/tbb/machine/msvc_ia32_common.h b/ohos/arm64-v8a/include/tbb/machine/msvc_ia32_common.h new file mode 100644 index 00000000..2e17836d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/msvc_ia32_common.h @@ -0,0 +1,275 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_msvc_ia32_common_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_msvc_ia32_common_H + +#include + +//TODO: consider moving this macro to tbb_config.h and using where MSVC asm is used +#if !_M_X64 || __INTEL_COMPILER + #define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 1 +#else + //MSVC in x64 mode does not accept inline assembler + #define __TBB_X86_MSVC_INLINE_ASM_AVAILABLE 0 + #define __TBB_NO_X86_MSVC_INLINE_ASM_MSG "The compiler being used is not supported (outdated?)" +#endif + +#if _M_X64 + #define __TBB_r(reg_name) r##reg_name + #define __TBB_W(name) name##64 + namespace tbb { namespace internal { namespace msvc_intrinsics { + typedef __int64 word; + }}} +#else + #define __TBB_r(reg_name) e##reg_name + #define __TBB_W(name) name + namespace tbb { namespace internal { namespace msvc_intrinsics { + typedef long word; + }}} +#endif + +#if __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT + // S is the operand size in bytes, B is the suffix for intrinsics for that size + #define __TBB_MACHINE_DEFINE_ATOMICS(S,B,T,U) \ + __pragma(intrinsic( _InterlockedCompareExchange##B )) \ + static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \ + return _InterlockedCompareExchange##B ( (T*)ptr, value, comparand ); \ + } \ + __pragma(intrinsic( _InterlockedExchangeAdd##B )) \ + static inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \ + return _InterlockedExchangeAdd##B ( (T*)ptr, addend ); \ + } \ + __pragma(intrinsic( _InterlockedExchange##B )) \ + static inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \ + return _InterlockedExchange##B ( (T*)ptr, value ); \ + } + + // Atomic intrinsics for 1, 2, and 4 bytes are available for x86 & x64 + __TBB_MACHINE_DEFINE_ATOMICS(1,8,char,__int8) + __TBB_MACHINE_DEFINE_ATOMICS(2,16,short,__int16) + __TBB_MACHINE_DEFINE_ATOMICS(4,,long,__int32) + + #if __TBB_WORDSIZE==8 + __TBB_MACHINE_DEFINE_ATOMICS(8,64,__int64,__int64) + #endif + + #undef __TBB_MACHINE_DEFINE_ATOMICS +#endif /* __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT */ + +#if _MSC_VER>=1300 || __INTEL_COMPILER>=1100 + #pragma intrinsic(_ReadWriteBarrier) + #pragma intrinsic(_mm_mfence) + #define __TBB_compiler_fence() _ReadWriteBarrier() + #define __TBB_full_memory_fence() _mm_mfence() +#elif __TBB_X86_MSVC_INLINE_ASM_AVAILABLE + #define __TBB_compiler_fence() __asm { __asm nop } + #define __TBB_full_memory_fence() __asm { __asm mfence } +#else + #error Unsupported compiler; define __TBB_{control,acquire,release}_consistency_helper to support it +#endif + +#define __TBB_control_consistency_helper() __TBB_compiler_fence() +#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() +#define __TBB_release_consistency_helper() __TBB_compiler_fence() + +#if (_MSC_VER>=1300) || (__INTEL_COMPILER) + #pragma intrinsic(_mm_pause) + namespace tbb { namespace internal { namespace msvc_intrinsics { + static inline void pause (uintptr_t delay ) { + for (;delay>0; --delay ) + _mm_pause(); + } + }}} + #define __TBB_Pause(V) tbb::internal::msvc_intrinsics::pause(V) + #define __TBB_SINGLE_PAUSE _mm_pause() +#else + #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE + #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG + #endif + namespace tbb { namespace internal { namespace msvc_inline_asm + static inline void pause (uintptr_t delay ) { + _asm + { + mov __TBB_r(ax), delay + __TBB_L1: + pause + add __TBB_r(ax), -1 + jne __TBB_L1 + } + return; + } + }}} + #define __TBB_Pause(V) tbb::internal::msvc_inline_asm::pause(V) + #define __TBB_SINGLE_PAUSE __asm pause +#endif + +#if (_MSC_VER>=1400 && !__INTEL_COMPILER) || (__INTEL_COMPILER>=1200) +// MSVC did not have this intrinsic prior to VC8. +// ICL 11.1 fails to compile a TBB example if __TBB_Log2 uses the intrinsic. + #pragma intrinsic(__TBB_W(_BitScanReverse)) + namespace tbb { namespace internal { namespace msvc_intrinsics { + static inline uintptr_t lg_bsr( uintptr_t i ){ + unsigned long j; + __TBB_W(_BitScanReverse)( &j, i ); + return j; + } + }}} + #define __TBB_Log2(V) tbb::internal::msvc_intrinsics::lg_bsr(V) +#else + #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE + #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG + #endif + namespace tbb { namespace internal { namespace msvc_inline_asm { + static inline uintptr_t lg_bsr( uintptr_t i ){ + uintptr_t j; + __asm + { + bsr __TBB_r(ax), i + mov j, __TBB_r(ax) + } + return j; + } + }}} + #define __TBB_Log2(V) tbb::internal::msvc_inline_asm::lg_bsr(V) +#endif + +#if _MSC_VER>=1400 + #pragma intrinsic(__TBB_W(_InterlockedOr)) + #pragma intrinsic(__TBB_W(_InterlockedAnd)) + namespace tbb { namespace internal { namespace msvc_intrinsics { + static inline void lock_or( volatile void *operand, intptr_t addend ){ + __TBB_W(_InterlockedOr)((volatile word*)operand, addend); + } + static inline void lock_and( volatile void *operand, intptr_t addend ){ + __TBB_W(_InterlockedAnd)((volatile word*)operand, addend); + } + }}} + #define __TBB_AtomicOR(P,V) tbb::internal::msvc_intrinsics::lock_or(P,V) + #define __TBB_AtomicAND(P,V) tbb::internal::msvc_intrinsics::lock_and(P,V) +#else + #if !__TBB_X86_MSVC_INLINE_ASM_AVAILABLE + #error __TBB_NO_X86_MSVC_INLINE_ASM_MSG + #endif + namespace tbb { namespace internal { namespace msvc_inline_asm { + static inline void lock_or( volatile void *operand, __int32 addend ) { + __asm + { + mov eax, addend + mov edx, [operand] + lock or [edx], eax + } + } + static inline void lock_and( volatile void *operand, __int32 addend ) { + __asm + { + mov eax, addend + mov edx, [operand] + lock and [edx], eax + } + } + }}} + #define __TBB_AtomicOR(P,V) tbb::internal::msvc_inline_asm::lock_or(P,V) + #define __TBB_AtomicAND(P,V) tbb::internal::msvc_inline_asm::lock_and(P,V) +#endif + +#pragma intrinsic(__rdtsc) +namespace tbb { namespace internal { typedef uint64_t machine_tsc_t; } } +static inline tbb::internal::machine_tsc_t __TBB_machine_time_stamp() { + return __rdtsc(); +} +#define __TBB_time_stamp() __TBB_machine_time_stamp() + +// API to retrieve/update FPU control setting +#define __TBB_CPU_CTL_ENV_PRESENT 1 + +namespace tbb { namespace internal { class cpu_ctl_env; } } +#if __TBB_X86_MSVC_INLINE_ASM_AVAILABLE + inline void __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* ctl ) { + __asm { + __asm mov __TBB_r(ax), ctl + __asm stmxcsr [__TBB_r(ax)] + __asm fstcw [__TBB_r(ax)+4] + } + } + inline void __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* ctl ) { + __asm { + __asm mov __TBB_r(ax), ctl + __asm ldmxcsr [__TBB_r(ax)] + __asm fldcw [__TBB_r(ax)+4] + } + } +#else + extern "C" { + void __TBB_EXPORTED_FUNC __TBB_get_cpu_ctl_env ( tbb::internal::cpu_ctl_env* ); + void __TBB_EXPORTED_FUNC __TBB_set_cpu_ctl_env ( const tbb::internal::cpu_ctl_env* ); + } +#endif + +namespace tbb { +namespace internal { +class cpu_ctl_env { +private: + int mxcsr; + short x87cw; + static const int MXCSR_CONTROL_MASK = ~0x3f; /* all except last six status bits */ +public: + bool operator!=( const cpu_ctl_env& ctl ) const { return mxcsr != ctl.mxcsr || x87cw != ctl.x87cw; } + void get_env() { + __TBB_get_cpu_ctl_env( this ); + mxcsr &= MXCSR_CONTROL_MASK; + } + void set_env() const { __TBB_set_cpu_ctl_env( this ); } +}; +} // namespace internal +} // namespace tbb + +#if !__TBB_WIN8UI_SUPPORT +extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); +#define __TBB_Yield() SwitchToThread() +#else +#include +#define __TBB_Yield() std::this_thread::yield() +#endif + +#undef __TBB_r +#undef __TBB_W +#undef __TBB_word + +extern "C" { + __int8 __TBB_EXPORTED_FUNC __TBB_machine_try_lock_elided (volatile void* ptr); + void __TBB_EXPORTED_FUNC __TBB_machine_unlock_elided (volatile void* ptr); + + // 'pause' instruction aborts HLE/RTM transactions + inline static void __TBB_machine_try_lock_elided_cancel() { __TBB_SINGLE_PAUSE; } + +#if __TBB_TSX_INTRINSICS_PRESENT + #define __TBB_machine_is_in_transaction _xtest + #define __TBB_machine_begin_transaction _xbegin + #define __TBB_machine_end_transaction _xend + // The value (0xFF) below comes from the + // Intel(R) 64 and IA-32 Architectures Optimization Reference Manual 12.4.5 lock not free + #define __TBB_machine_transaction_conflict_abort() _xabort(0xFF) +#else + __int8 __TBB_EXPORTED_FUNC __TBB_machine_is_in_transaction(); + unsigned __int32 __TBB_EXPORTED_FUNC __TBB_machine_begin_transaction(); + void __TBB_EXPORTED_FUNC __TBB_machine_end_transaction(); + void __TBB_EXPORTED_FUNC __TBB_machine_transaction_conflict_abort(); +#endif /* __TBB_TSX_INTRINSICS_PRESENT */ +} diff --git a/ohos/arm64-v8a/include/tbb/machine/sunos_sparc.h b/ohos/arm64-v8a/include/tbb/machine/sunos_sparc.h new file mode 100644 index 00000000..b5864ba7 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/sunos_sparc.h @@ -0,0 +1,199 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_sunos_sparc_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_sunos_sparc_H + +#include +#include + +#define __TBB_WORDSIZE 8 +// Big endian is assumed for SPARC. +// While hardware may support page-specific bi-endianness, only big endian pages may be exposed to TBB +#define __TBB_ENDIANNESS __TBB_ENDIAN_BIG + +/** To those working on SPARC hardware. Consider relaxing acquire and release + consistency helpers to no-op (as this port covers TSO mode only). **/ +#define __TBB_compiler_fence() __asm__ __volatile__ ("": : :"memory") +#define __TBB_control_consistency_helper() __TBB_compiler_fence() +#define __TBB_acquire_consistency_helper() __TBB_compiler_fence() +#define __TBB_release_consistency_helper() __TBB_compiler_fence() +#define __TBB_full_memory_fence() __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreStore|#StoreLoad": : : "memory") + +//-------------------------------------------------- +// Compare and swap +//-------------------------------------------------- + +/** + * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr + * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand + * @param value value to assign *ptr to if *ptr==comparand + * @param comparand value to compare with *ptr + ( @return value originally in memory at ptr, regardless of success +*/ +static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ){ + int32_t result; + __asm__ __volatile__( + "cas\t[%5],%4,%1" + : "=m"(*(int32_t *)ptr), "=r"(result) + : "m"(*(int32_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) + : "memory"); + return result; +} + +/** + * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr + * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand + * @param value value to assign *ptr to if *ptr==comparand + * @param comparand value to compare with *ptr + ( @return value originally in memory at ptr, regardless of success + */ +static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ){ + int64_t result; + __asm__ __volatile__( + "casx\t[%5],%4,%1" + : "=m"(*(int64_t *)ptr), "=r"(result) + : "m"(*(int64_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) + : "memory"); + return result; +} + +//--------------------------------------------------- +// Fetch and add +//--------------------------------------------------- + +/** + * Atomic fetch and add for 32 bit values, in this case implemented by continuously checking success of atomicity + * @param ptr pointer to value to add addend to + * @param addened value to add to *ptr + * @return value at ptr before addened was added + */ +static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend){ + int32_t result; + __asm__ __volatile__ ( + "0:\t add\t %3, %4, %0\n" // do addition + "\t cas\t [%2], %3, %0\n" // cas to store result in memory + "\t cmp\t %3, %0\n" // check if value from memory is original + "\t bne,a,pn\t %%icc, 0b\n" // if not try again + "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added + : "=&r"(result), "=m"(*(int32_t *)ptr) + : "r"(ptr), "r"(*(int32_t *)ptr), "r"(addend), "m"(*(int32_t *)ptr) + : "ccr", "memory"); + return result; +} + +/** + * Atomic fetch and add for 64 bit values, in this case implemented by continuously checking success of atomicity + * @param ptr pointer to value to add addend to + * @param addened value to add to *ptr + * @return value at ptr before addened was added + */ +static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend){ + int64_t result; + __asm__ __volatile__ ( + "0:\t add\t %3, %4, %0\n" // do addition + "\t casx\t [%2], %3, %0\n" // cas to store result in memory + "\t cmp\t %3, %0\n" // check if value from memory is original + "\t bne,a,pn\t %%xcc, 0b\n" // if not try again + "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added + : "=&r"(result), "=m"(*(int64_t *)ptr) + : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) + : "ccr", "memory"); + return result; +} + +//-------------------------------------------------------- +// Logarithm (base two, integer) +//-------------------------------------------------------- + +static inline int64_t __TBB_machine_lg( uint64_t x ) { + __TBB_ASSERT(x, "__TBB_Log2(0) undefined"); + uint64_t count; + // one hot encode + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); + x |= (x >> 32); + // count 1's + __asm__ ("popc %1, %0" : "=r"(count) : "r"(x) ); + return count-1; +} + +//-------------------------------------------------------- + +static inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) { + __asm__ __volatile__ ( + "0:\t or\t %2, %3, %%g1\n" // do operation + "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory + "\t cmp\t %2, %%g1\n" // check if value from memory is original + "\t bne,a,pn\t %%xcc, 0b\n" // if not try again + "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added + : "=m"(*(int64_t *)ptr) + : "r"(ptr), "r"(*(int64_t *)ptr), "r"(value), "m"(*(int64_t *)ptr) + : "ccr", "g1", "memory"); +} + +static inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) { + __asm__ __volatile__ ( + "0:\t and\t %2, %3, %%g1\n" // do operation + "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory + "\t cmp\t %2, %%g1\n" // check if value from memory is original + "\t bne,a,pn\t %%xcc, 0b\n" // if not try again + "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added + : "=m"(*(int64_t *)ptr) + : "r"(ptr), "r"(*(int64_t *)ptr), "r"(value), "m"(*(int64_t *)ptr) + : "ccr", "g1", "memory"); +} + + +static inline void __TBB_machine_pause( int32_t delay ) { + // do nothing, inlined, doesn't matter +} + +// put 0xff in memory location, return memory value, +// generic trylockbyte puts 0x01, however this is fine +// because all that matters is that 0 is unlocked +static inline bool __TBB_machine_trylockbyte(unsigned char &flag){ + unsigned char result; + __asm__ __volatile__ ( + "ldstub\t [%2], %0\n" + : "=r"(result), "=m"(flag) + : "r"(&flag), "m"(flag) + : "memory"); + return result == 0; +} + +#define __TBB_USE_GENERIC_PART_WORD_CAS 1 +#define __TBB_USE_GENERIC_PART_WORD_FETCH_ADD 1 +#define __TBB_USE_GENERIC_FETCH_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + +#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) +#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) + +// Definition of other functions +#define __TBB_Pause(V) __TBB_machine_pause(V) +#define __TBB_Log2(V) __TBB_machine_lg(V) + +#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) diff --git a/ohos/arm64-v8a/include/tbb/machine/windows_api.h b/ohos/arm64-v8a/include/tbb/machine/windows_api.h new file mode 100644 index 00000000..54987915 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/windows_api.h @@ -0,0 +1,65 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_machine_windows_api_H +#define __TBB_machine_windows_api_H + +#if _WIN32 || _WIN64 + +#include + +#if _WIN32_WINNT < 0x0600 +// The following Windows API function is declared explicitly; +// otherwise it fails to compile by VS2005. +#if !defined(WINBASEAPI) || (_WIN32_WINNT < 0x0501 && _MSC_VER == 1400) +#define __TBB_WINBASEAPI extern "C" +#else +#define __TBB_WINBASEAPI WINBASEAPI +#endif +__TBB_WINBASEAPI BOOL WINAPI TryEnterCriticalSection( LPCRITICAL_SECTION ); +__TBB_WINBASEAPI BOOL WINAPI InitializeCriticalSectionAndSpinCount( LPCRITICAL_SECTION, DWORD ); +// Overloading WINBASEAPI macro and using local functions missing in Windows XP/2003 +#define InitializeCriticalSectionEx inlineInitializeCriticalSectionEx +#define CreateSemaphoreEx inlineCreateSemaphoreEx +#define CreateEventEx inlineCreateEventEx +inline BOOL WINAPI inlineInitializeCriticalSectionEx( LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD ) +{ + return InitializeCriticalSectionAndSpinCount( lpCriticalSection, dwSpinCount ); +} +inline HANDLE WINAPI inlineCreateSemaphoreEx( LPSECURITY_ATTRIBUTES lpSemaphoreAttributes, LONG lInitialCount, LONG lMaximumCount, LPCTSTR lpName, DWORD, DWORD ) +{ + return CreateSemaphore( lpSemaphoreAttributes, lInitialCount, lMaximumCount, lpName ); +} +inline HANDLE WINAPI inlineCreateEventEx( LPSECURITY_ATTRIBUTES lpEventAttributes, LPCTSTR lpName, DWORD dwFlags, DWORD ) +{ + BOOL manual_reset = dwFlags&0x00000001 ? TRUE : FALSE; // CREATE_EVENT_MANUAL_RESET + BOOL initial_set = dwFlags&0x00000002 ? TRUE : FALSE; // CREATE_EVENT_INITIAL_SET + return CreateEvent( lpEventAttributes, manual_reset, initial_set, lpName ); +} +#endif + +#if defined(RTL_SRWLOCK_INIT) +#ifndef __TBB_USE_SRWLOCK +// TODO: turn it on when bug 1952 will be fixed +#define __TBB_USE_SRWLOCK 0 +#endif +#endif + +#else +#error tbb/machine/windows_api.h should only be used for Windows based platforms +#endif // _WIN32 || _WIN64 + +#endif // __TBB_machine_windows_api_H diff --git a/ohos/arm64-v8a/include/tbb/machine/windows_ia32.h b/ohos/arm64-v8a/include/tbb/machine/windows_ia32.h new file mode 100644 index 00000000..62968226 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/windows_ia32.h @@ -0,0 +1,105 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_ia32_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_windows_ia32_H + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Workaround for overzealous compiler warnings in /Wp64 mode + #pragma warning (push) + #pragma warning (disable: 4244 4267) +#endif + +#include "msvc_ia32_common.h" + +#define __TBB_WORDSIZE 4 +#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE + +extern "C" { + __int64 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ); + __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ); + __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ); + void __TBB_EXPORTED_FUNC __TBB_machine_store8 (volatile void *ptr, __int64 value ); + __int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr); +} + +#if !__TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT + +#define __TBB_MACHINE_DEFINE_ATOMICS(S,T,U,A,C) \ +static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \ + T result; \ + volatile T *p = (T *)ptr; \ + __asm \ + { \ + __asm mov edx, p \ + __asm mov C , value \ + __asm mov A , comparand \ + __asm lock cmpxchg [edx], C \ + __asm mov result, A \ + } \ + return result; \ +} \ +\ +static inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \ + T result; \ + volatile T *p = (T *)ptr; \ + __asm \ + { \ + __asm mov edx, p \ + __asm mov A, addend \ + __asm lock xadd [edx], A \ + __asm mov result, A \ + } \ + return result; \ +}\ +\ +static inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \ + T result; \ + volatile T *p = (T *)ptr; \ + __asm \ + { \ + __asm mov edx, p \ + __asm mov A, value \ + __asm lock xchg [edx], A \ + __asm mov result, A \ + } \ + return result; \ +} + + +__TBB_MACHINE_DEFINE_ATOMICS(1, __int8, __int8, al, cl) +__TBB_MACHINE_DEFINE_ATOMICS(2, __int16, __int16, ax, cx) +__TBB_MACHINE_DEFINE_ATOMICS(4, ptrdiff_t, ptrdiff_t, eax, ecx) + +#undef __TBB_MACHINE_DEFINE_ATOMICS + +#endif /* __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT */ + +//TODO: Check if it possible and profitable for IA-32 architecture on (Linux and Windows) +//to use of 64-bit load/store via floating point registers together with full fence +//for sequentially consistent load/store, instead of CAS. +#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 + + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif // warnings 4244, 4267 are back diff --git a/ohos/arm64-v8a/include/tbb/machine/windows_intel64.h b/ohos/arm64-v8a/include/tbb/machine/windows_intel64.h new file mode 100644 index 00000000..6e2a2cc7 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/machine/windows_intel64.h @@ -0,0 +1,70 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#if !defined(__TBB_machine_H) || defined(__TBB_machine_windows_intel64_H) +#error Do not #include this internal file directly; use public TBB headers instead. +#endif + +#define __TBB_machine_windows_intel64_H + +#define __TBB_WORDSIZE 8 +#define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE + +#include "msvc_ia32_common.h" + +#if !__TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT + +#include +#pragma intrinsic(_InterlockedCompareExchange,_InterlockedExchangeAdd,_InterlockedExchange) +#pragma intrinsic(_InterlockedCompareExchange64,_InterlockedExchangeAdd64,_InterlockedExchange64) + +// ATTENTION: if you ever change argument types in machine-specific primitives, +// please take care of atomic_word<> specializations in tbb/atomic.h +extern "C" { + __int8 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp1 (volatile void *ptr, __int8 value, __int8 comparand ); + __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd1 (volatile void *ptr, __int8 addend ); + __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore1 (volatile void *ptr, __int8 value ); + __int16 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp2 (volatile void *ptr, __int16 value, __int16 comparand ); + __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd2 (volatile void *ptr, __int16 addend ); + __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore2 (volatile void *ptr, __int16 value ); +} + +inline long __TBB_machine_cmpswp4 (volatile void *ptr, __int32 value, __int32 comparand ) { + return _InterlockedCompareExchange( (long*)ptr, value, comparand ); +} +inline long __TBB_machine_fetchadd4 (volatile void *ptr, __int32 addend ) { + return _InterlockedExchangeAdd( (long*)ptr, addend ); +} +inline long __TBB_machine_fetchstore4 (volatile void *ptr, __int32 value ) { + return _InterlockedExchange( (long*)ptr, value ); +} + +inline __int64 __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ) { + return _InterlockedCompareExchange64( (__int64*)ptr, value, comparand ); +} +inline __int64 __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ) { + return _InterlockedExchangeAdd64( (__int64*)ptr, addend ); +} +inline __int64 __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ) { + return _InterlockedExchange64( (__int64*)ptr, value ); +} + +#endif /* __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT */ + +#define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 +#define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 +#define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 diff --git a/ohos/arm64-v8a/include/tbb/memory_pool.h b/ohos/arm64-v8a/include/tbb/memory_pool.h new file mode 100644 index 00000000..99a31d6a --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/memory_pool.h @@ -0,0 +1,275 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_memory_pool_H +#define __TBB_memory_pool_H + +#if !TBB_PREVIEW_MEMORY_POOL +#error Set TBB_PREVIEW_MEMORY_POOL to include memory_pool.h +#endif +/** @file */ + +#include "scalable_allocator.h" +#include // std::bad_alloc +#include // std::runtime_error, std::invalid_argument +// required in C++03 to construct std::runtime_error and std::invalid_argument +#include +#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC +#include // std::forward +#endif + +#if __TBB_EXTRA_DEBUG +#define __TBBMALLOC_ASSERT ASSERT +#else +#define __TBBMALLOC_ASSERT(a,b) ((void)0) +#endif + +namespace tbb { +namespace interface6 { +//! @cond INTERNAL +namespace internal { + +//! Base of thread-safe pool allocator for variable-size requests +class pool_base : tbb::internal::no_copy { + // Pool interface is separate from standard allocator classes because it has + // to maintain internal state, no copy or assignment. Move and swap are possible. +public: + //! Reset pool to reuse its memory (free all objects at once) + void recycle() { rml::pool_reset(my_pool); } + + //! The "malloc" analogue to allocate block of memory of size bytes + void *malloc(size_t size) { return rml::pool_malloc(my_pool, size); } + + //! The "free" analogue to discard a previously allocated piece of memory. + void free(void* ptr) { rml::pool_free(my_pool, ptr); } + + //! The "realloc" analogue complementing pool_malloc. + // Enables some low-level optimization possibilities + void *realloc(void* ptr, size_t size) { + return rml::pool_realloc(my_pool, ptr, size); + } + +protected: + //! destroy pool - must be called in a child class + void destroy() { rml::pool_destroy(my_pool); } + + rml::MemoryPool *my_pool; +}; + +} // namespace internal +//! @endcond + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Workaround for erroneous "unreferenced parameter" warning in method destroy. + #pragma warning (push) + #pragma warning (disable: 4100) +#endif + +//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 +/** @ingroup memory_allocation */ +template +class memory_pool_allocator { +protected: + typedef P pool_type; + pool_type *my_pool; + template + friend class memory_pool_allocator; + template + friend bool operator==( const memory_pool_allocator& a, const memory_pool_allocator& b); + template + friend bool operator!=( const memory_pool_allocator& a, const memory_pool_allocator& b); +public: + typedef typename tbb::internal::allocator_type::value_type value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template struct rebind { + typedef memory_pool_allocator other; + }; + + explicit memory_pool_allocator(pool_type &pool) throw() : my_pool(&pool) {} + memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} + template + memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} + + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } + + //! Allocate space for n objects. + pointer allocate( size_type n, const void* /*hint*/ = 0) { + pointer p = static_cast( my_pool->malloc( n*sizeof(value_type) ) ); + if (!p) + tbb::internal::throw_exception(std::bad_alloc()); + return p; + } + //! Free previously allocated block of memory. + void deallocate( pointer p, size_type ) { + my_pool->free(p); + } + //! Largest value for which method allocate might succeed. + size_type max_size() const throw() { + size_type max = static_cast(-1) / sizeof (value_type); + return (max > 0 ? max : 1); + } + //! Copy-construct value at location pointed to by p. +#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + template + void construct(U *p, Args&&... args) + { ::new((void *)p) U(std::forward(args)...); } +#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC +#if __TBB_CPP11_RVALUE_REF_PRESENT + void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} +#endif + void construct( pointer p, const value_type& value ) { ::new((void*)(p)) value_type(value); } +#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + + //! Destroy value at location pointed to by p. + void destroy( pointer p ) { p->~value_type(); } + +}; + +#if _MSC_VER && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif // warning 4100 is back + +//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 +/** @ingroup memory_allocation */ +template +class memory_pool_allocator { +public: + typedef P pool_type; + typedef void* pointer; + typedef const void* const_pointer; + typedef void value_type; + template struct rebind { + typedef memory_pool_allocator other; + }; + + explicit memory_pool_allocator( pool_type &pool) throw() : my_pool(&pool) {} + memory_pool_allocator( const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} + template + memory_pool_allocator(const memory_pool_allocator& src) throw() : my_pool(src.my_pool) {} + +protected: + pool_type *my_pool; + template + friend class memory_pool_allocator; + template + friend bool operator==( const memory_pool_allocator& a, const memory_pool_allocator& b); + template + friend bool operator!=( const memory_pool_allocator& a, const memory_pool_allocator& b); +}; + +template +inline bool operator==( const memory_pool_allocator& a, const memory_pool_allocator& b) {return a.my_pool==b.my_pool;} + +template +inline bool operator!=( const memory_pool_allocator& a, const memory_pool_allocator& b) {return a.my_pool!=b.my_pool;} + + +//! Thread-safe growable pool allocator for variable-size requests +template +class memory_pool : public internal::pool_base { + Alloc my_alloc; // TODO: base-class optimization + static void *allocate_request(intptr_t pool_id, size_t & bytes); + static int deallocate_request(intptr_t pool_id, void*, size_t raw_bytes); + +public: + //! construct pool with underlying allocator + explicit memory_pool(const Alloc &src = Alloc()); + + //! destroy pool + ~memory_pool() { destroy(); } // call the callbacks first and destroy my_alloc latter + +}; + +class fixed_pool : public internal::pool_base { + void *my_buffer; + size_t my_size; + inline static void *allocate_request(intptr_t pool_id, size_t & bytes); + +public: + //! construct pool with underlying allocator + inline fixed_pool(void *buf, size_t size); + //! destroy pool + ~fixed_pool() { destroy(); } +}; + +//////////////// Implementation /////////////// + +template +memory_pool::memory_pool(const Alloc &src) : my_alloc(src) { + rml::MemPoolPolicy args(allocate_request, deallocate_request, + sizeof(typename Alloc::value_type)); + rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool); + if (res!=rml::POOL_OK) + tbb::internal::throw_exception(std::runtime_error("Can't create pool")); +} +template +void *memory_pool::allocate_request(intptr_t pool_id, size_t & bytes) { + memory_pool &self = *reinterpret_cast*>(pool_id); + const size_t unit_size = sizeof(typename Alloc::value_type); + __TBBMALLOC_ASSERT( 0 == bytes%unit_size, NULL); + void *ptr; + __TBB_TRY { ptr = self.my_alloc.allocate( bytes/unit_size ); } + __TBB_CATCH(...) { return 0; } + return ptr; +} +#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED + // Workaround for erroneous "unreachable code" warning in the template below. + // Specific for VC++ 17-18 compiler + #pragma warning (push) + #pragma warning (disable: 4702) +#endif +template +int memory_pool::deallocate_request(intptr_t pool_id, void* raw_ptr, size_t raw_bytes) { + memory_pool &self = *reinterpret_cast*>(pool_id); + const size_t unit_size = sizeof(typename Alloc::value_type); + __TBBMALLOC_ASSERT( 0 == raw_bytes%unit_size, NULL); + self.my_alloc.deallocate( static_cast(raw_ptr), raw_bytes/unit_size ); + return 0; +} +#if __TBB_MSVC_UNREACHABLE_CODE_IGNORED + #pragma warning (pop) +#endif +inline fixed_pool::fixed_pool(void *buf, size_t size) : my_buffer(buf), my_size(size) { + if (!buf || !size) + // TODO: improve support for mode with exceptions disabled + tbb::internal::throw_exception(std::invalid_argument("Zero in parameter is invalid")); + rml::MemPoolPolicy args(allocate_request, 0, size, /*fixedPool=*/true); + rml::MemPoolError res = rml::pool_create_v1(intptr_t(this), &args, &my_pool); + if (res!=rml::POOL_OK) + tbb::internal::throw_exception(std::runtime_error("Can't create pool")); +} +inline void *fixed_pool::allocate_request(intptr_t pool_id, size_t & bytes) { + fixed_pool &self = *reinterpret_cast(pool_id); + __TBBMALLOC_ASSERT(0 != self.my_size, "The buffer must not be used twice."); + bytes = self.my_size; + self.my_size = 0; // remember that buffer has been used + return self.my_buffer; +} + +} //namespace interface6 +using interface6::memory_pool_allocator; +using interface6::memory_pool; +using interface6::fixed_pool; +} //namespace tbb + +#undef __TBBMALLOC_ASSERT +#endif// __TBB_memory_pool_H diff --git a/ohos/arm64-v8a/include/tbb/mutex.h b/ohos/arm64-v8a/include/tbb/mutex.h new file mode 100644 index 00000000..94269f3d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/mutex.h @@ -0,0 +1,246 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_mutex_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_mutex_H +#pragma message("TBB Warning: tbb/mutex.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_mutex_H +#define __TBB_mutex_H + +#define __TBB_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if _WIN32||_WIN64 +#include "machine/windows_api.h" +#else +#include +#endif /* _WIN32||_WIN64 */ + +#include +#include "aligned_space.h" +#include "tbb_stddef.h" +#include "tbb_profiling.h" + +namespace tbb { + +//! Wrapper around the platform's native lock. +/** @ingroup synchronization */ +class __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::critical_section is deprecated, use std::mutex") mutex : internal::mutex_copy_deprecated_and_disabled { +public: + //! Construct unacquired mutex. + mutex() { +#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS + internal_construct(); +#else + #if _WIN32||_WIN64 + InitializeCriticalSectionEx(&impl, 4000, 0); + #else + int error_code = pthread_mutex_init(&impl,NULL); + if( error_code ) + tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed"); + #endif /* _WIN32||_WIN64*/ +#endif /* TBB_USE_ASSERT */ + }; + + ~mutex() { +#if TBB_USE_ASSERT + internal_destroy(); +#else + #if _WIN32||_WIN64 + DeleteCriticalSection(&impl); + #else + pthread_mutex_destroy(&impl); + + #endif /* _WIN32||_WIN64 */ +#endif /* TBB_USE_ASSERT */ + }; + + class scoped_lock; + friend class scoped_lock; + + //! The scoped locking pattern + /** It helps to avoid the common problem of forgetting to release lock. + It also nicely provides the "node" for queuing locks. */ + class scoped_lock : internal::no_copy { + public: + //! Construct lock that has not acquired a mutex. + scoped_lock() : my_mutex(NULL) {}; + + //! Acquire lock on given mutex. + scoped_lock( mutex& mutex ) { + acquire( mutex ); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if( my_mutex ) + release(); + } + + //! Acquire lock on given mutex. + void acquire( mutex& mutex ) { +#if TBB_USE_ASSERT + internal_acquire(mutex); +#else + mutex.lock(); + my_mutex = &mutex; +#endif /* TBB_USE_ASSERT */ + } + + //! Try acquire lock on given mutex. + bool try_acquire( mutex& mutex ) { +#if TBB_USE_ASSERT + return internal_try_acquire (mutex); +#else + bool result = mutex.try_lock(); + if( result ) + my_mutex = &mutex; + return result; +#endif /* TBB_USE_ASSERT */ + } + + //! Release lock + void release() { +#if TBB_USE_ASSERT + internal_release (); +#else + my_mutex->unlock(); + my_mutex = NULL; +#endif /* TBB_USE_ASSERT */ + } + + private: + //! The pointer to the current mutex to work + mutex* my_mutex; + + //! All checks from acquire using mutex.state were moved here + void __TBB_EXPORTED_METHOD internal_acquire( mutex& m ); + + //! All checks from try_acquire using mutex.state were moved here + bool __TBB_EXPORTED_METHOD internal_try_acquire( mutex& m ); + + //! All checks from release using mutex.state were moved here + void __TBB_EXPORTED_METHOD internal_release(); + + friend class mutex; + }; + + // Mutex traits + static const bool is_rw_mutex = false; + static const bool is_recursive_mutex = false; + static const bool is_fair_mutex = false; + + // ISO C++0x compatibility methods + + //! Acquire lock + void lock() { +#if TBB_USE_ASSERT + aligned_space tmp; + new(tmp.begin()) scoped_lock(*this); +#else + #if _WIN32||_WIN64 + EnterCriticalSection(&impl); + #else + int error_code = pthread_mutex_lock(&impl); + if( error_code ) + tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_lock failed"); + #endif /* _WIN32||_WIN64 */ +#endif /* TBB_USE_ASSERT */ + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() { +#if TBB_USE_ASSERT + aligned_space tmp; + scoped_lock& s = *tmp.begin(); + s.my_mutex = NULL; + return s.internal_try_acquire(*this); +#else + #if _WIN32||_WIN64 + return TryEnterCriticalSection(&impl)!=0; + #else + return pthread_mutex_trylock(&impl)==0; + #endif /* _WIN32||_WIN64 */ +#endif /* TBB_USE_ASSERT */ + } + + //! Release lock + void unlock() { +#if TBB_USE_ASSERT + aligned_space tmp; + scoped_lock& s = *tmp.begin(); + s.my_mutex = this; + s.internal_release(); +#else + #if _WIN32||_WIN64 + LeaveCriticalSection(&impl); + #else + pthread_mutex_unlock(&impl); + #endif /* _WIN32||_WIN64 */ +#endif /* TBB_USE_ASSERT */ + } + + //! Return native_handle + #if _WIN32||_WIN64 + typedef LPCRITICAL_SECTION native_handle_type; + #else + typedef pthread_mutex_t* native_handle_type; + #endif + native_handle_type native_handle() { return (native_handle_type) &impl; } + + enum state_t { + INITIALIZED=0x1234, + DESTROYED=0x789A, + HELD=0x56CD + }; +private: +#if _WIN32||_WIN64 + CRITICAL_SECTION impl; + enum state_t state; +#else + pthread_mutex_t impl; +#endif /* _WIN32||_WIN64 */ + + //! All checks from mutex constructor using mutex.state were moved here + void __TBB_EXPORTED_METHOD internal_construct(); + + //! All checks from mutex destructor using mutex.state were moved here + void __TBB_EXPORTED_METHOD internal_destroy(); + +#if _WIN32||_WIN64 +public: + //! Set the internal state + void set_state( state_t to ) { state = to; } +#endif +}; + +__TBB_DEFINE_PROFILING_SET_NAME(mutex) + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_mutex_H_include_area + +#endif /* __TBB_mutex_H */ diff --git a/ohos/arm64-v8a/include/tbb/null_mutex.h b/ohos/arm64-v8a/include/tbb/null_mutex.h new file mode 100644 index 00000000..3c7bad1a --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/null_mutex.h @@ -0,0 +1,50 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_null_mutex_H +#define __TBB_null_mutex_H + +#include "tbb_stddef.h" + +namespace tbb { + +//! A mutex which does nothing +/** A null_mutex does no operation and simulates success. + @ingroup synchronization */ +class null_mutex : internal::mutex_copy_deprecated_and_disabled { +public: + //! Represents acquisition of a mutex. + class scoped_lock : internal::no_copy { + public: + scoped_lock() {} + scoped_lock( null_mutex& ) {} + ~scoped_lock() {} + void acquire( null_mutex& ) {} + bool try_acquire( null_mutex& ) { return true; } + void release() {} + }; + + null_mutex() {} + + // Mutex traits + static const bool is_rw_mutex = false; + static const bool is_recursive_mutex = true; + static const bool is_fair_mutex = true; +}; + +} + +#endif /* __TBB_null_mutex_H */ diff --git a/ohos/arm64-v8a/include/tbb/null_rw_mutex.h b/ohos/arm64-v8a/include/tbb/null_rw_mutex.h new file mode 100644 index 00000000..f1ea4df6 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/null_rw_mutex.h @@ -0,0 +1,52 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_null_rw_mutex_H +#define __TBB_null_rw_mutex_H + +#include "tbb_stddef.h" + +namespace tbb { + +//! A rw mutex which does nothing +/** A null_rw_mutex is a rw mutex that does nothing and simulates successful operation. + @ingroup synchronization */ +class null_rw_mutex : internal::mutex_copy_deprecated_and_disabled { +public: + //! Represents acquisition of a mutex. + class scoped_lock : internal::no_copy { + public: + scoped_lock() {} + scoped_lock( null_rw_mutex& , bool = true ) {} + ~scoped_lock() {} + void acquire( null_rw_mutex& , bool = true ) {} + bool upgrade_to_writer() { return true; } + bool downgrade_to_reader() { return true; } + bool try_acquire( null_rw_mutex& , bool = true ) { return true; } + void release() {} + }; + + null_rw_mutex() {} + + // Mutex traits + static const bool is_rw_mutex = true; + static const bool is_recursive_mutex = true; + static const bool is_fair_mutex = true; +}; + +} + +#endif /* __TBB_null_rw_mutex_H */ diff --git a/ohos/arm64-v8a/include/tbb/parallel_do.h b/ohos/arm64-v8a/include/tbb/parallel_do.h new file mode 100644 index 00000000..1b63e279 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/parallel_do.h @@ -0,0 +1,553 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_do_H +#define __TBB_parallel_do_H + +#define __TBB_parallel_do_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "internal/_range_iterator.h" +#include "internal/_template_helpers.h" +#include "task.h" +#include "aligned_space.h" +#include + +namespace tbb { +namespace interface9 { +//! @cond INTERNAL +namespace internal { + template class parallel_do_feeder_impl; +} // namespace internal +//! @endcond + +//! Class the user supplied algorithm body uses to add new tasks +/** \param Item Work item type **/ + template + class parallel_do_feeder: ::tbb::internal::no_copy + { + parallel_do_feeder() {} + virtual ~parallel_do_feeder () {} + virtual void internal_add_copy( const Item& item ) = 0; +#if __TBB_CPP11_RVALUE_REF_PRESENT + virtual void internal_add_move( Item&& item ) = 0; +#endif + template friend class internal::parallel_do_feeder_impl; + public: + //! Add a work item to a running parallel_do. + void add( const Item& item ) {internal_add_copy(item);} +#if __TBB_CPP11_RVALUE_REF_PRESENT + void add( Item&& item ) {internal_add_move(std::move(item));} +#endif + }; + +//! @cond INTERNAL +namespace internal { + template class do_group_task; + + //! For internal use only. + /** Selects one of the two possible forms of function call member operator. + @ingroup algorithms **/ + template + class parallel_do_operator_selector + { + typedef parallel_do_feeder Feeder; + template + static void internal_call( const Body& obj, __TBB_FORWARDING_REF(A1) arg1, A2&, void (Body::*)(CvItem) const ) { + obj(tbb::internal::forward(arg1)); + } + template + static void internal_call( const Body& obj, __TBB_FORWARDING_REF(A1) arg1, A2& arg2, void (Body::*)(CvItem, parallel_do_feeder&) const ) { + obj(tbb::internal::forward(arg1), arg2); + } + template + static void internal_call( const Body& obj, __TBB_FORWARDING_REF(A1) arg1, A2&, void (Body::*)(CvItem&) const ) { + obj(arg1); + } + template + static void internal_call( const Body& obj, __TBB_FORWARDING_REF(A1) arg1, A2& arg2, void (Body::*)(CvItem&, parallel_do_feeder&) const ) { + obj(arg1, arg2); + } + public: + template + static void call( const Body& obj, __TBB_FORWARDING_REF(A1) arg1, A2& arg2 ) + { + internal_call( obj, tbb::internal::forward(arg1), arg2, &Body::operator() ); + } + }; + + //! For internal use only. + /** Executes one iteration of a do. + @ingroup algorithms */ + template + class do_iteration_task: public task + { + typedef parallel_do_feeder_impl feeder_type; + + Item my_value; + feeder_type& my_feeder; + + do_iteration_task( const Item& value, feeder_type& feeder ) : + my_value(value), my_feeder(feeder) + {} + +#if __TBB_CPP11_RVALUE_REF_PRESENT + do_iteration_task( Item&& value, feeder_type& feeder ) : + my_value(std::move(value)), my_feeder(feeder) + {} +#endif + + task* execute() __TBB_override + { + parallel_do_operator_selector::call(*my_feeder.my_body, tbb::internal::move(my_value), my_feeder); + return NULL; + } + + template friend class parallel_do_feeder_impl; + }; // class do_iteration_task + + template + class do_iteration_task_iter: public task + { + typedef parallel_do_feeder_impl feeder_type; + + Iterator my_iter; + feeder_type& my_feeder; + + do_iteration_task_iter( const Iterator& iter, feeder_type& feeder ) : + my_iter(iter), my_feeder(feeder) + {} + + task* execute() __TBB_override + { + parallel_do_operator_selector::call(*my_feeder.my_body, *my_iter, my_feeder); + return NULL; + } + + template friend class do_group_task_forward; + template friend class do_group_task_input; + template friend class do_task_iter; + }; // class do_iteration_task_iter + + //! For internal use only. + /** Implements new task adding procedure. + @ingroup algorithms **/ + template + class parallel_do_feeder_impl : public parallel_do_feeder + { +#if __TBB_CPP11_RVALUE_REF_PRESENT + //Avoiding use of copy constructor in a virtual method if the type does not support it + void internal_add_copy_impl(std::true_type, const Item& item) { + typedef do_iteration_task iteration_type; + iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *this); + task::spawn(t); + } + void internal_add_copy_impl(std::false_type, const Item&) { + __TBB_ASSERT(false, "Overloading for r-value reference doesn't work or it's not movable and not copyable object"); + } + void internal_add_copy( const Item& item ) __TBB_override + { +#if __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT + internal_add_copy_impl(typename std::is_copy_constructible::type(), item); +#else + internal_add_copy_impl(std::true_type(), item); +#endif + } + void internal_add_move( Item&& item ) __TBB_override + { + typedef do_iteration_task iteration_type; + iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(std::move(item), *this); + task::spawn(t); + } +#else /* ! __TBB_CPP11_RVALUE_REF_PRESENT */ + void internal_add_copy(const Item& item) __TBB_override { + typedef do_iteration_task iteration_type; + iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *this); + task::spawn(t); + } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + public: + const Body* my_body; + empty_task* my_barrier; + + parallel_do_feeder_impl() + { + my_barrier = new( task::allocate_root() ) empty_task(); + __TBB_ASSERT(my_barrier, "root task allocation failed"); + } + +#if __TBB_TASK_GROUP_CONTEXT + parallel_do_feeder_impl(tbb::task_group_context &context) + { + my_barrier = new( task::allocate_root(context) ) empty_task(); + __TBB_ASSERT(my_barrier, "root task allocation failed"); + } +#endif + + ~parallel_do_feeder_impl() + { + my_barrier->destroy(*my_barrier); + } + }; // class parallel_do_feeder_impl + + + //! For internal use only + /** Unpacks a block of iterations. + @ingroup algorithms */ + + template + class do_group_task_forward: public task + { + static const size_t max_arg_size = 4; + + typedef parallel_do_feeder_impl feeder_type; + + feeder_type& my_feeder; + Iterator my_first; + size_t my_size; + + do_group_task_forward( Iterator first, size_t size, feeder_type& feeder ) + : my_feeder(feeder), my_first(first), my_size(size) + {} + + task* execute() __TBB_override + { + typedef do_iteration_task_iter iteration_type; + __TBB_ASSERT( my_size>0, NULL ); + task_list list; + task* t; + size_t k=0; + for(;;) { + t = new( allocate_child() ) iteration_type( my_first, my_feeder ); + ++my_first; + if( ++k==my_size ) break; + list.push_back(*t); + } + set_ref_count(int(k+1)); + spawn(list); + spawn_and_wait_for_all(*t); + return NULL; + } + + template friend class do_task_iter; + }; // class do_group_task_forward + + template + class do_group_task_input: public task + { + static const size_t max_arg_size = 4; + + typedef parallel_do_feeder_impl feeder_type; + + feeder_type& my_feeder; + size_t my_size; + aligned_space my_arg; + + do_group_task_input( feeder_type& feeder ) + : my_feeder(feeder), my_size(0) + {} + + task* execute() __TBB_override + { +#if __TBB_CPP11_RVALUE_REF_PRESENT + typedef std::move_iterator Item_iterator; +#else + typedef Item* Item_iterator; +#endif + typedef do_iteration_task_iter iteration_type; + __TBB_ASSERT( my_size>0, NULL ); + task_list list; + task* t; + size_t k=0; + for(;;) { + t = new( allocate_child() ) iteration_type( Item_iterator(my_arg.begin() + k), my_feeder ); + if( ++k==my_size ) break; + list.push_back(*t); + } + set_ref_count(int(k+1)); + spawn(list); + spawn_and_wait_for_all(*t); + return NULL; + } + + ~do_group_task_input(){ + for( size_t k=0; k~Item(); + } + + template friend class do_task_iter; + }; // class do_group_task_input + + //! For internal use only. + /** Gets block of iterations and packages them into a do_group_task. + @ingroup algorithms */ + template + class do_task_iter: public task + { + typedef parallel_do_feeder_impl feeder_type; + + public: + do_task_iter( Iterator first, Iterator last , feeder_type& feeder ) : + my_first(first), my_last(last), my_feeder(feeder) + {} + + private: + Iterator my_first; + Iterator my_last; + feeder_type& my_feeder; + + /* Do not merge run(xxx) and run_xxx() methods. They are separated in order + to make sure that compilers will eliminate unused argument of type xxx + (that is will not put it on stack). The sole purpose of this argument + is overload resolution. + + An alternative could be using template functions, but explicit specialization + of member function templates is not supported for non specialized class + templates. Besides template functions would always fall back to the least + efficient variant (the one for input iterators) in case of iterators having + custom tags derived from basic ones. */ + task* execute() __TBB_override + { + typedef typename std::iterator_traits::iterator_category iterator_tag; + return run( (iterator_tag*)NULL ); + } + + /** This is the most restricted variant that operates on input iterators or + iterators with unknown tags (tags not derived from the standard ones). **/ + inline task* run( void* ) { return run_for_input_iterator(); } + + task* run_for_input_iterator() { + typedef do_group_task_input block_type; + + block_type& t = *new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(my_feeder); + size_t k=0; + while( !(my_first == my_last) ) { + // Move semantics are automatically used when supported by the iterator + new (t.my_arg.begin() + k) Item(*my_first); + ++my_first; + if( ++k==block_type::max_arg_size ) { + if ( !(my_first == my_last) ) + recycle_to_reexecute(); + break; + } + } + if( k==0 ) { + destroy(t); + return NULL; + } else { + t.my_size = k; + return &t; + } + } + + inline task* run( std::forward_iterator_tag* ) { return run_for_forward_iterator(); } + + task* run_for_forward_iterator() { + typedef do_group_task_forward block_type; + + Iterator first = my_first; + size_t k=0; + while( !(my_first==my_last) ) { + ++my_first; + if( ++k==block_type::max_arg_size ) { + if ( !(my_first==my_last) ) + recycle_to_reexecute(); + break; + } + } + return k==0 ? NULL : new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(first, k, my_feeder); + } + + inline task* run( std::random_access_iterator_tag* ) { return run_for_random_access_iterator(); } + + task* run_for_random_access_iterator() { + typedef do_group_task_forward block_type; + typedef do_iteration_task_iter iteration_type; + + size_t k = static_cast(my_last-my_first); + if( k > block_type::max_arg_size ) { + Iterator middle = my_first + k/2; + + empty_task& c = *new( allocate_continuation() ) empty_task; + do_task_iter& b = *new( c.allocate_child() ) do_task_iter(middle, my_last, my_feeder); + recycle_as_child_of(c); + + my_last = middle; + c.set_ref_count(2); + c.spawn(b); + return this; + }else if( k != 0 ) { + task_list list; + task* t; + size_t k1=0; + for(;;) { + t = new( allocate_child() ) iteration_type(my_first, my_feeder); + ++my_first; + if( ++k1==k ) break; + list.push_back(*t); + } + set_ref_count(int(k+1)); + spawn(list); + spawn_and_wait_for_all(*t); + } + return NULL; + } + }; // class do_task_iter + + //! For internal use only. + /** Implements parallel iteration over a range. + @ingroup algorithms */ + template + void run_parallel_do( Iterator first, Iterator last, const Body& body +#if __TBB_TASK_GROUP_CONTEXT + , task_group_context& context +#endif + ) + { + typedef do_task_iter root_iteration_task; +#if __TBB_TASK_GROUP_CONTEXT + parallel_do_feeder_impl feeder(context); +#else + parallel_do_feeder_impl feeder; +#endif + feeder.my_body = &body; + + root_iteration_task &t = *new( feeder.my_barrier->allocate_child() ) root_iteration_task(first, last, feeder); + + feeder.my_barrier->set_ref_count(2); + feeder.my_barrier->spawn_and_wait_for_all(t); + } + + //! For internal use only. + /** Detects types of Body's operator function arguments. + @ingroup algorithms **/ + template + void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item) const +#if __TBB_TASK_GROUP_CONTEXT + , task_group_context& context +#endif + ) + { + run_parallel_do::type>( first, last, body +#if __TBB_TASK_GROUP_CONTEXT + , context +#endif + ); + } + + //! For internal use only. + /** Detects types of Body's operator function arguments. + @ingroup algorithms **/ + template + void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item, parallel_do_feeder<_Item>&) const +#if __TBB_TASK_GROUP_CONTEXT + , task_group_context& context +#endif + ) + { + run_parallel_do::type>( first, last, body +#if __TBB_TASK_GROUP_CONTEXT + , context +#endif + ); + } + +} // namespace internal +} // namespace interface9 +//! @endcond + +/** \page parallel_do_body_req Requirements on parallel_do body + Class \c Body implementing the concept of parallel_do body must define: + - \code + B::operator()( + cv_item_type item, + parallel_do_feeder& feeder + ) const + + OR + + B::operator()( cv_item_type& item ) const + \endcode Process item. + May be invoked concurrently for the same \c this but different \c item. + + - \code item_type( const item_type& ) \endcode + Copy a work item. + - \code ~item_type() \endcode Destroy a work item +**/ + +/** \name parallel_do + See also requirements on \ref parallel_do_body_req "parallel_do Body". **/ +//@{ +//! Parallel iteration over a range, with optional addition of more work. +/** @ingroup algorithms */ +template +void parallel_do( Iterator first, Iterator last, const Body& body ) +{ + if ( first == last ) + return; +#if __TBB_TASK_GROUP_CONTEXT + task_group_context context(internal::PARALLEL_DO); +#endif + interface9::internal::select_parallel_do( first, last, body, &Body::operator() +#if __TBB_TASK_GROUP_CONTEXT + , context +#endif + ); +} + +template +void parallel_do(Range& rng, const Body& body) { + parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body); +} + +template +void parallel_do(const Range& rng, const Body& body) { + parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body); +} + +#if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration over a range, with optional addition of more work and user-supplied context +/** @ingroup algorithms */ +template +void parallel_do( Iterator first, Iterator last, const Body& body, task_group_context& context ) +{ + if ( first == last ) + return; + interface9::internal::select_parallel_do( first, last, body, &Body::operator(), context ); +} + +template +void parallel_do(Range& rng, const Body& body, task_group_context& context) { + parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body, context); +} + +template +void parallel_do(const Range& rng, const Body& body, task_group_context& context) { + parallel_do(tbb::internal::first(rng), tbb::internal::last(rng), body, context); +} + +#endif // __TBB_TASK_GROUP_CONTEXT + +//@} + +using interface9::parallel_do_feeder; + +} // namespace + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_do_H_include_area + +#endif /* __TBB_parallel_do_H */ diff --git a/ohos/arm64-v8a/include/tbb/parallel_for.h b/ohos/arm64-v8a/include/tbb/parallel_for.h new file mode 100644 index 00000000..0b4861f4 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/parallel_for.h @@ -0,0 +1,425 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_for_H +#define __TBB_parallel_for_H + +#define __TBB_parallel_for_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include +#include "task.h" +#include "partitioner.h" +#include "blocked_range.h" +#include "tbb_exception.h" +#include "internal/_tbb_trace_impl.h" + +namespace tbb { + +namespace interface9 { +//! @cond INTERNAL +namespace internal { + + //! allocate right task with new parent + void* allocate_sibling(task* start_for_task, size_t bytes); + + //! Task type used in parallel_for + /** @ingroup algorithms */ + template + class start_for: public task { + Range my_range; + const Body my_body; + typename Partitioner::task_partition_type my_partition; + task* execute() __TBB_override; + + //! Update affinity info, if any. + void note_affinity( affinity_id id ) __TBB_override { + my_partition.note_affinity( id ); + } + + public: + //! Constructor for root task. + start_for( const Range& range, const Body& body, Partitioner& partitioner ) : + my_range(range), + my_body(body), + my_partition(partitioner) + { + tbb::internal::fgt_algorithm(tbb::internal::PARALLEL_FOR_TASK, this, NULL); + } + //! Splitting constructor used to generate children. + /** parent_ becomes left child. Newly constructed object is right child. */ + start_for( start_for& parent_, typename Partitioner::split_type& split_obj) : + my_range(parent_.my_range, split_obj), + my_body(parent_.my_body), + my_partition(parent_.my_partition, split_obj) + { + my_partition.set_affinity(*this); + tbb::internal::fgt_algorithm(tbb::internal::PARALLEL_FOR_TASK, this, (void *)&parent_); + } + //! Construct right child from the given range as response to the demand. + /** parent_ remains left child. Newly constructed object is right child. */ + start_for( start_for& parent_, const Range& r, depth_t d ) : + my_range(r), + my_body(parent_.my_body), + my_partition(parent_.my_partition, split()) + { + my_partition.set_affinity(*this); + my_partition.align_depth( d ); + tbb::internal::fgt_algorithm(tbb::internal::PARALLEL_FOR_TASK, this, (void *)&parent_); + } + static void run( const Range& range, const Body& body, Partitioner& partitioner ) { + if( !range.empty() ) { +#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP + start_for& a = *new(task::allocate_root()) start_for(range,body,partitioner); +#else + // Bound context prevents exceptions from body to affect nesting or sibling algorithms, + // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. + task_group_context context(PARALLEL_FOR); + start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner); +#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ + // REGION BEGIN + fgt_begin_algorithm( tbb::internal::PARALLEL_FOR_TASK, (void*)&context ); + task::spawn_root_and_wait(a); + fgt_end_algorithm( (void*)&context ); + // REGION END + } + } +#if __TBB_TASK_GROUP_CONTEXT + static void run( const Range& range, const Body& body, Partitioner& partitioner, task_group_context& context ) { + if( !range.empty() ) { + start_for& a = *new(task::allocate_root(context)) start_for(range,body,partitioner); + // REGION BEGIN + fgt_begin_algorithm( tbb::internal::PARALLEL_FOR_TASK, (void*)&context ); + task::spawn_root_and_wait(a); + fgt_end_algorithm( (void*)&context ); + // END REGION + } + } +#endif /* __TBB_TASK_GROUP_CONTEXT */ + //! Run body for range, serves as callback for partitioner + void run_body( Range &r ) { + fgt_alg_begin_body( tbb::internal::PARALLEL_FOR_TASK, (void *)const_cast(&(this->my_body)), (void*)this ); + my_body( r ); + fgt_alg_end_body( (void *)const_cast(&(this->my_body)) ); + } + + //! spawn right task, serves as callback for partitioner + void offer_work(typename Partitioner::split_type& split_obj) { + spawn( *new( allocate_sibling(static_cast(this), sizeof(start_for)) ) start_for(*this, split_obj) ); + } + //! spawn right task, serves as callback for partitioner + void offer_work(const Range& r, depth_t d = 0) { + spawn( *new( allocate_sibling(static_cast(this), sizeof(start_for)) ) start_for(*this, r, d) ); + } + }; + + //! allocate right task with new parent + // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined + inline void* allocate_sibling(task* start_for_task, size_t bytes) { + task* parent_ptr = new( start_for_task->allocate_continuation() ) flag_task(); + start_for_task->set_parent(parent_ptr); + parent_ptr->set_ref_count(2); + return &parent_ptr->allocate_child().allocate(bytes); + } + + //! execute task for parallel_for + template + task* start_for::execute() { + my_partition.check_being_stolen( *this ); + my_partition.execute(*this, my_range); + return NULL; + } +} // namespace internal +//! @endcond +} // namespace interfaceX + +//! @cond INTERNAL +namespace internal { + using interface9::internal::start_for; + + //! Calls the function with values from range [begin, end) with a step provided + template + class parallel_for_body : internal::no_assign { + const Function &my_func; + const Index my_begin; + const Index my_step; + public: + parallel_for_body( const Function& _func, Index& _begin, Index& _step ) + : my_func(_func), my_begin(_begin), my_step(_step) {} + + void operator()( const tbb::blocked_range& r ) const { + // A set of local variables to help the compiler with vectorization of the following loop. + Index b = r.begin(); + Index e = r.end(); + Index ms = my_step; + Index k = my_begin + b*ms; + +#if __INTEL_COMPILER +#pragma ivdep +#if __TBB_ASSERT_ON_VECTORIZATION_FAILURE +#pragma vector always assert +#endif +#endif + for ( Index i = b; i < e; ++i, k += ms ) { + my_func( k ); + } + } + }; +} // namespace internal +//! @endcond + +// Requirements on Range concept are documented in blocked_range.h + +/** \page parallel_for_body_req Requirements on parallel_for body + Class \c Body implementing the concept of parallel_for body must define: + - \code Body::Body( const Body& ); \endcode Copy constructor + - \code Body::~Body(); \endcode Destructor + - \code void Body::operator()( Range& r ) const; \endcode Function call operator applying the body to range \c r. +**/ + +/** \name parallel_for + See also requirements on \ref range_req "Range" and \ref parallel_for_body_req "parallel_for Body". **/ +//@{ + +//! Parallel iteration over range with default partitioner. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body ) { + internal::start_for::run(range,body,__TBB_DEFAULT_PARTITIONER()); +} + +//! Parallel iteration over range with simple partitioner. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { + internal::start_for::run(range,body,partitioner); +} + +//! Parallel iteration over range with auto_partitioner. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { + internal::start_for::run(range,body,partitioner); +} + +//! Parallel iteration over range with static_partitioner. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, const static_partitioner& partitioner ) { + internal::start_for::run(range,body,partitioner); +} + +//! Parallel iteration over range with affinity_partitioner. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { + internal::start_for::run(range,body,partitioner); +} + +#if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration over range with default partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, task_group_context& context ) { + internal::start_for::run(range, body, __TBB_DEFAULT_PARTITIONER(), context); +} + +//! Parallel iteration over range with simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner, task_group_context& context ) { + internal::start_for::run(range, body, partitioner, context); +} + +//! Parallel iteration over range with auto_partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner, task_group_context& context ) { + internal::start_for::run(range, body, partitioner, context); +} + +//! Parallel iteration over range with static_partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, const static_partitioner& partitioner, task_group_context& context ) { + internal::start_for::run(range, body, partitioner, context); +} + +//! Parallel iteration over range with affinity_partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner, task_group_context& context ) { + internal::start_for::run(range,body,partitioner, context); +} +#endif /* __TBB_TASK_GROUP_CONTEXT */ +//@} + +namespace strict_ppl { + +//@{ +//! Implementation of parallel iteration over stepped range of integers with explicit step and partitioner +template +void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner) { + if (step <= 0 ) + internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument + else if (last > first) { + // Above "else" avoids "potential divide by zero" warning on some platforms + Index end = (last - first - Index(1)) / step + Index(1); + tbb::blocked_range range(static_cast(0), end); + internal::parallel_for_body body(f, first, step); + tbb::parallel_for(range, body, partitioner); + } +} + +//! Parallel iteration over a range of integers with a step provided and default partitioner +template +void parallel_for(Index first, Index last, Index step, const Function& f) { + parallel_for_impl(first, last, step, f, auto_partitioner()); +} +//! Parallel iteration over a range of integers with a step provided and simple partitioner +template +void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner) { + parallel_for_impl(first, last, step, f, partitioner); +} +//! Parallel iteration over a range of integers with a step provided and auto partitioner +template +void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner) { + parallel_for_impl(first, last, step, f, partitioner); +} +//! Parallel iteration over a range of integers with a step provided and static partitioner +template +void parallel_for(Index first, Index last, Index step, const Function& f, const static_partitioner& partitioner) { + parallel_for_impl(first, last, step, f, partitioner); +} +//! Parallel iteration over a range of integers with a step provided and affinity partitioner +template +void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner) { + parallel_for_impl(first, last, step, f, partitioner); +} + +//! Parallel iteration over a range of integers with a default step value and default partitioner +template +void parallel_for(Index first, Index last, const Function& f) { + parallel_for_impl(first, last, static_cast(1), f, auto_partitioner()); +} +//! Parallel iteration over a range of integers with a default step value and simple partitioner +template +void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner) { + parallel_for_impl(first, last, static_cast(1), f, partitioner); +} +//! Parallel iteration over a range of integers with a default step value and auto partitioner +template +void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner) { + parallel_for_impl(first, last, static_cast(1), f, partitioner); +} +//! Parallel iteration over a range of integers with a default step value and static partitioner +template +void parallel_for(Index first, Index last, const Function& f, const static_partitioner& partitioner) { + parallel_for_impl(first, last, static_cast(1), f, partitioner); +} +//! Parallel iteration over a range of integers with a default step value and affinity partitioner +template +void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner) { + parallel_for_impl(first, last, static_cast(1), f, partitioner); +} + +#if __TBB_TASK_GROUP_CONTEXT +//! Implementation of parallel iteration over stepped range of integers with explicit step, task group context, and partitioner +template +void parallel_for_impl(Index first, Index last, Index step, const Function& f, Partitioner& partitioner, tbb::task_group_context &context) { + if (step <= 0 ) + internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument + else if (last > first) { + // Above "else" avoids "potential divide by zero" warning on some platforms + Index end = (last - first - Index(1)) / step + Index(1); + tbb::blocked_range range(static_cast(0), end); + internal::parallel_for_body body(f, first, step); + tbb::parallel_for(range, body, partitioner, context); + } +} + +//! Parallel iteration over a range of integers with explicit step, task group context, and default partitioner +template +void parallel_for(Index first, Index last, Index step, const Function& f, tbb::task_group_context &context) { + parallel_for_impl(first, last, step, f, auto_partitioner(), context); +} +//! Parallel iteration over a range of integers with explicit step, task group context, and simple partitioner + template +void parallel_for(Index first, Index last, Index step, const Function& f, const simple_partitioner& partitioner, tbb::task_group_context &context) { + parallel_for_impl(first, last, step, f, partitioner, context); +} +//! Parallel iteration over a range of integers with explicit step, task group context, and auto partitioner + template +void parallel_for(Index first, Index last, Index step, const Function& f, const auto_partitioner& partitioner, tbb::task_group_context &context) { + parallel_for_impl(first, last, step, f, partitioner, context); +} +//! Parallel iteration over a range of integers with explicit step, task group context, and static partitioner +template +void parallel_for(Index first, Index last, Index step, const Function& f, const static_partitioner& partitioner, tbb::task_group_context &context) { + parallel_for_impl(first, last, step, f, partitioner, context); +} +//! Parallel iteration over a range of integers with explicit step, task group context, and affinity partitioner + template +void parallel_for(Index first, Index last, Index step, const Function& f, affinity_partitioner& partitioner, tbb::task_group_context &context) { + parallel_for_impl(first, last, step, f, partitioner, context); +} + + +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and default partitioner +template +void parallel_for(Index first, Index last, const Function& f, tbb::task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, auto_partitioner(), context); +} +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and simple partitioner + template +void parallel_for(Index first, Index last, const Function& f, const simple_partitioner& partitioner, tbb::task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, partitioner, context); +} +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and auto partitioner + template +void parallel_for(Index first, Index last, const Function& f, const auto_partitioner& partitioner, tbb::task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, partitioner, context); +} +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and static partitioner +template +void parallel_for(Index first, Index last, const Function& f, const static_partitioner& partitioner, tbb::task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, partitioner, context); +} +//! Parallel iteration over a range of integers with a default step value, explicit task group context, and affinity_partitioner + template +void parallel_for(Index first, Index last, const Function& f, affinity_partitioner& partitioner, tbb::task_group_context &context) { + parallel_for_impl(first, last, static_cast(1), f, partitioner, context); +} + +#endif /* __TBB_TASK_GROUP_CONTEXT */ +//@} + +} // namespace strict_ppl + +using strict_ppl::parallel_for; + +} // namespace tbb + +#if TBB_PREVIEW_SERIAL_SUBSET +#define __TBB_NORMAL_EXECUTION +#include "../serial/tbb/parallel_for.h" +#undef __TBB_NORMAL_EXECUTION +#endif + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_for_H_include_area + +#endif /* __TBB_parallel_for_H */ diff --git a/ohos/arm64-v8a/include/tbb/parallel_for_each.h b/ohos/arm64-v8a/include/tbb/parallel_for_each.h new file mode 100644 index 00000000..e1da1bbd --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/parallel_for_each.h @@ -0,0 +1,133 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_for_each_H +#define __TBB_parallel_for_each_H + +#include "parallel_do.h" +#include "parallel_for.h" + +namespace tbb { + +//! @cond INTERNAL +namespace internal { + // The class calls user function in operator() + template + class parallel_for_each_body_do : internal::no_assign { + const Function &my_func; + public: + parallel_for_each_body_do(const Function &_func) : my_func(_func) {} + + void operator()(typename std::iterator_traits::reference value) const { + my_func(value); + } + }; + + // The class calls user function in operator() + template + class parallel_for_each_body_for : internal::no_assign { + const Function &my_func; + public: + parallel_for_each_body_for(const Function &_func) : my_func(_func) {} + + void operator()(tbb::blocked_range range) const { +#if __INTEL_COMPILER +#pragma ivdep +#endif + for(Iterator it = range.begin(), end = range.end(); it != end; ++it) { + my_func(*it); + } + } + }; + + template + struct parallel_for_each_impl { +#if __TBB_TASK_GROUP_CONTEXT + static void doit(Iterator first, Iterator last, const Function& f, task_group_context &context) { + internal::parallel_for_each_body_do body(f); + tbb::parallel_do(first, last, body, context); + } +#endif + static void doit(Iterator first, Iterator last, const Function& f) { + internal::parallel_for_each_body_do body(f); + tbb::parallel_do(first, last, body); + } + }; + template + struct parallel_for_each_impl { +#if __TBB_TASK_GROUP_CONTEXT + static void doit(Iterator first, Iterator last, const Function& f, task_group_context &context) { + internal::parallel_for_each_body_for body(f); + tbb::parallel_for(tbb::blocked_range(first, last), body, context); + } +#endif + static void doit(Iterator first, Iterator last, const Function& f) { + internal::parallel_for_each_body_for body(f); + tbb::parallel_for(tbb::blocked_range(first, last), body); + } + }; +} // namespace internal +//! @endcond + +/** \name parallel_for_each + **/ +//@{ +//! Calls function f for all items from [first, last) interval using user-supplied context +/** @ingroup algorithms */ +#if __TBB_TASK_GROUP_CONTEXT +template +void parallel_for_each(Iterator first, Iterator last, const Function& f, task_group_context &context) { + internal::parallel_for_each_impl::iterator_category>::doit(first, last, f, context); +} + +//! Calls function f for all items from rng using user-supplied context +/** @ingroup algorithms */ +template +void parallel_for_each(Range& rng, const Function& f, task_group_context& context) { + parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f, context); +} + +//! Calls function f for all items from const rng user-supplied context +/** @ingroup algorithms */ +template +void parallel_for_each(const Range& rng, const Function& f, task_group_context& context) { + parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f, context); +} +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +//! Uses default context +template +void parallel_for_each(Iterator first, Iterator last, const Function& f) { + internal::parallel_for_each_impl::iterator_category>::doit(first, last, f); +} + +//! Uses default context +template +void parallel_for_each(Range& rng, const Function& f) { + parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f); +} + +//! Uses default context +template +void parallel_for_each(const Range& rng, const Function& f) { + parallel_for_each(tbb::internal::first(rng), tbb::internal::last(rng), f); +} + +//@} + +} // namespace + +#endif /* __TBB_parallel_for_each_H */ diff --git a/ohos/arm64-v8a/include/tbb/parallel_invoke.h b/ohos/arm64-v8a/include/tbb/parallel_invoke.h new file mode 100644 index 00000000..4be4bdb7 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/parallel_invoke.h @@ -0,0 +1,460 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_invoke_H +#define __TBB_parallel_invoke_H + +#define __TBB_parallel_invoke_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "task.h" +#include "tbb_profiling.h" + +#if __TBB_VARIADIC_PARALLEL_INVOKE + #include // std::forward +#endif + +namespace tbb { + +#if !__TBB_TASK_GROUP_CONTEXT + /** Dummy to avoid cluttering the bulk of the header with enormous amount of ifdefs. **/ + struct task_group_context { + task_group_context(tbb::internal::string_index){} + }; +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +//! @cond INTERNAL +namespace internal { + // Simple task object, executing user method + template + class function_invoker : public task{ + public: + function_invoker(const function& _function) : my_function(_function) {} + private: + const function &my_function; + task* execute() __TBB_override + { + my_function(); + return NULL; + } + }; + + // The class spawns two or three child tasks + template + class spawner : public task { + private: + const function1& my_func1; + const function2& my_func2; + const function3& my_func3; + bool is_recycled; + + task* execute () __TBB_override { + if(is_recycled){ + return NULL; + }else{ + __TBB_ASSERT(N==2 || N==3, "Number of arguments passed to spawner is wrong"); + set_ref_count(N); + recycle_as_safe_continuation(); + internal::function_invoker* invoker2 = new (allocate_child()) internal::function_invoker(my_func2); + __TBB_ASSERT(invoker2, "Child task allocation failed"); + spawn(*invoker2); + size_t n = N; // To prevent compiler warnings + if (n>2) { + internal::function_invoker* invoker3 = new (allocate_child()) internal::function_invoker(my_func3); + __TBB_ASSERT(invoker3, "Child task allocation failed"); + spawn(*invoker3); + } + my_func1(); + is_recycled = true; + return NULL; + } + } // execute + + public: + spawner(const function1& _func1, const function2& _func2, const function3& _func3) : my_func1(_func1), my_func2(_func2), my_func3(_func3), is_recycled(false) {} + }; + + // Creates and spawns child tasks + class parallel_invoke_helper : public empty_task { + public: + // Dummy functor class + class parallel_invoke_noop { + public: + void operator() () const {} + }; + // Creates a helper object with user-defined number of children expected + parallel_invoke_helper(int number_of_children) + { + set_ref_count(number_of_children + 1); + } + +#if __TBB_VARIADIC_PARALLEL_INVOKE + void add_children() {} + void add_children(tbb::task_group_context&) {} + + template + void add_children(function&& _func) + { + internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(std::forward(_func)); + __TBB_ASSERT(invoker, "Child task allocation failed"); + spawn(*invoker); + } + + template + void add_children(function&& _func, tbb::task_group_context&) + { + add_children(std::forward(_func)); + } + + // Adds child(ren) task(s) and spawns them + template + void add_children(function1&& _func1, function2&& _func2, function&&... _func) + { + // The third argument is dummy, it is ignored actually. + parallel_invoke_noop noop; + typedef internal::spawner<2, function1, function2, parallel_invoke_noop> spawner_type; + spawner_type & sub_root = *new(allocate_child()) spawner_type(std::forward(_func1), std::forward(_func2), noop); + spawn(sub_root); + add_children(std::forward(_func)...); + } +#else + // Adds child task and spawns it + template + void add_children (const function &_func) + { + internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(_func); + __TBB_ASSERT(invoker, "Child task allocation failed"); + spawn(*invoker); + } + + // Adds a task with multiple child tasks and spawns it + // two arguments + template + void add_children (const function1& _func1, const function2& _func2) + { + // The third argument is dummy, it is ignored actually. + parallel_invoke_noop noop; + internal::spawner<2, function1, function2, parallel_invoke_noop>& sub_root = *new(allocate_child())internal::spawner<2, function1, function2, parallel_invoke_noop>(_func1, _func2, noop); + spawn(sub_root); + } + // three arguments + template + void add_children (const function1& _func1, const function2& _func2, const function3& _func3) + { + internal::spawner<3, function1, function2, function3>& sub_root = *new(allocate_child())internal::spawner<3, function1, function2, function3>(_func1, _func2, _func3); + spawn(sub_root); + } +#endif // __TBB_VARIADIC_PARALLEL_INVOKE + + // Waits for all child tasks + template + void run_and_finish(const F0& f0) + { + internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(f0); + __TBB_ASSERT(invoker, "Child task allocation failed"); + spawn_and_wait_for_all(*invoker); + } + }; + // The class destroys root if exception occurred as well as in normal case + class parallel_invoke_cleaner: internal::no_copy { + public: +#if __TBB_TASK_GROUP_CONTEXT + parallel_invoke_cleaner(int number_of_children, tbb::task_group_context& context) + : root(*new(task::allocate_root(context)) internal::parallel_invoke_helper(number_of_children)) +#else + parallel_invoke_cleaner(int number_of_children, tbb::task_group_context&) + : root(*new(task::allocate_root()) internal::parallel_invoke_helper(number_of_children)) +#endif /* !__TBB_TASK_GROUP_CONTEXT */ + {} + + ~parallel_invoke_cleaner(){ + root.destroy(root); + } + internal::parallel_invoke_helper& root; + }; + +#if __TBB_VARIADIC_PARALLEL_INVOKE +// Determine whether the last parameter in a pack is task_group_context + template struct impl_selector; // to workaround a GCC bug + + template struct impl_selector { + typedef typename impl_selector::type type; + }; + + template struct impl_selector { + typedef false_type type; + }; + template<> struct impl_selector { + typedef true_type type; + }; + + // Select task_group_context parameter from the back of a pack + inline task_group_context& get_context( task_group_context& tgc ) { return tgc; } + + template + task_group_context& get_context( T1&& /*ignored*/, T&&... t ) + { return get_context( std::forward(t)... ); } + + // task_group_context is known to be at the back of the parameter pack + template + void parallel_invoke_impl(true_type, F0&& f0, F1&& f1, F&&... f) { + __TBB_STATIC_ASSERT(sizeof...(F)>0, "Variadic parallel_invoke implementation broken?"); + // # of child tasks: f0, f1, and a task for each two elements of the pack except the last + const size_t number_of_children = 2 + sizeof...(F)/2; + parallel_invoke_cleaner cleaner(number_of_children, get_context(std::forward(f)...)); + parallel_invoke_helper& root = cleaner.root; + + root.add_children(std::forward(f)...); + root.add_children(std::forward(f1)); + root.run_and_finish(std::forward(f0)); + } + + // task_group_context is not in the pack, needs to be added + template + void parallel_invoke_impl(false_type, F0&& f0, F1&& f1, F&&... f) { + tbb::task_group_context context(PARALLEL_INVOKE); + // Add context to the arguments, and redirect to the other overload + parallel_invoke_impl(true_type(), std::forward(f0), std::forward(f1), std::forward(f)..., context); + } +#endif +} // namespace internal +//! @endcond + +/** \name parallel_invoke + **/ +//@{ +//! Executes a list of tasks in parallel and waits for all tasks to complete. +/** @ingroup algorithms */ + +#if __TBB_VARIADIC_PARALLEL_INVOKE + +// parallel_invoke for two or more arguments via variadic templates +// presence of task_group_context is defined automatically +template +void parallel_invoke(F0&& f0, F1&& f1, F&&... f) { + typedef typename internal::impl_selector::type selector_type; + internal::parallel_invoke_impl(selector_type(), std::forward(f0), std::forward(f1), std::forward(f)...); +} + +#else + +// parallel_invoke with user-defined context +// two arguments +template +void parallel_invoke(const F0& f0, const F1& f1, tbb::task_group_context& context) { + internal::parallel_invoke_cleaner cleaner(2, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f1); + + root.run_and_finish(f0); +} + +// three arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, tbb::task_group_context& context) { + internal::parallel_invoke_cleaner cleaner(3, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f2); + root.add_children(f1); + + root.run_and_finish(f0); +} + +// four arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, + tbb::task_group_context& context) +{ + internal::parallel_invoke_cleaner cleaner(4, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f3); + root.add_children(f2); + root.add_children(f1); + + root.run_and_finish(f0); +} + +// five arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + tbb::task_group_context& context) +{ + internal::parallel_invoke_cleaner cleaner(3, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f4, f3); + root.add_children(f2, f1); + + root.run_and_finish(f0); +} + +// six arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5, + tbb::task_group_context& context) +{ + internal::parallel_invoke_cleaner cleaner(3, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f5, f4, f3); + root.add_children(f2, f1); + + root.run_and_finish(f0); +} + +// seven arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + const F5& f5, const F6& f6, + tbb::task_group_context& context) +{ + internal::parallel_invoke_cleaner cleaner(3, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f6, f5, f4); + root.add_children(f3, f2, f1); + + root.run_and_finish(f0); +} + +// eight arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + const F5& f5, const F6& f6, const F7& f7, + tbb::task_group_context& context) +{ + internal::parallel_invoke_cleaner cleaner(4, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f7, f6, f5); + root.add_children(f4, f3); + root.add_children(f2, f1); + + root.run_and_finish(f0); +} + +// nine arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + const F5& f5, const F6& f6, const F7& f7, const F8& f8, + tbb::task_group_context& context) +{ + internal::parallel_invoke_cleaner cleaner(4, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f8, f7, f6); + root.add_children(f5, f4, f3); + root.add_children(f2, f1); + + root.run_and_finish(f0); +} + +// ten arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9, + tbb::task_group_context& context) +{ + internal::parallel_invoke_cleaner cleaner(4, context); + internal::parallel_invoke_helper& root = cleaner.root; + + root.add_children(f9, f8, f7); + root.add_children(f6, f5, f4); + root.add_children(f3, f2, f1); + + root.run_and_finish(f0); +} + +// two arguments +template +void parallel_invoke(const F0& f0, const F1& f1) { + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, context); +} +// three arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2) { + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, f2, context); +} +// four arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3) { + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, f2, f3, context); +} +// five arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4) { + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, f2, f3, f4, context); +} +// six arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5) { + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, f2, f3, f4, f5, context); +} +// seven arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + const F5& f5, const F6& f6) +{ + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, f2, f3, f4, f5, f6, context); +} +// eight arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + const F5& f5, const F6& f6, const F7& f7) +{ + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, context); +} +// nine arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + const F5& f5, const F6& f6, const F7& f7, const F8& f8) +{ + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, context); +} +// ten arguments +template +void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, + const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9) +{ + task_group_context context(internal::PARALLEL_INVOKE); + parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context); +} +#endif // __TBB_VARIADIC_PARALLEL_INVOKE +//@} + +} // namespace + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_invoke_H_include_area + +#endif /* __TBB_parallel_invoke_H */ diff --git a/ohos/arm64-v8a/include/tbb/parallel_reduce.h b/ohos/arm64-v8a/include/tbb/parallel_reduce.h new file mode 100644 index 00000000..da2e2f8d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/parallel_reduce.h @@ -0,0 +1,657 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_reduce_H +#define __TBB_parallel_reduce_H + +#define __TBB_parallel_reduce_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include +#include "task.h" +#include "aligned_space.h" +#include "partitioner.h" +#include "tbb_profiling.h" + +namespace tbb { + +namespace interface9 { +//! @cond INTERNAL +namespace internal { + + using namespace tbb::internal; + + /** Values for reduction_context. */ + enum { + root_task, left_child, right_child + }; + + /** Represented as a char, not enum, for compactness. */ + typedef char reduction_context; + + //! Task type used to combine the partial results of parallel_reduce. + /** @ingroup algorithms */ + template + class finish_reduce: public flag_task { + //! Pointer to body, or NULL if the left child has not yet finished. + bool has_right_zombie; + const reduction_context my_context; + Body* my_body; + aligned_space zombie_space; + finish_reduce( reduction_context context_ ) : + has_right_zombie(false), // TODO: substitute by flag_task::child_stolen? + my_context(context_), + my_body(NULL) + { + } + ~finish_reduce() { + if( has_right_zombie ) + zombie_space.begin()->~Body(); + } + task* execute() __TBB_override { + if( has_right_zombie ) { + // Right child was stolen. + Body* s = zombie_space.begin(); + my_body->join( *s ); + // Body::join() won't be called if canceled. Defer destruction to destructor + } + if( my_context==left_child ) + itt_store_word_with_release( static_cast(parent())->my_body, my_body ); + return NULL; + } + template + friend class start_reduce; + }; + + //! allocate right task with new parent + void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes); + + //! Task type used to split the work of parallel_reduce. + /** @ingroup algorithms */ + template + class start_reduce: public task { + typedef finish_reduce finish_type; + Body* my_body; + Range my_range; + typename Partitioner::task_partition_type my_partition; + reduction_context my_context; + task* execute() __TBB_override; + //! Update affinity info, if any + void note_affinity( affinity_id id ) __TBB_override { + my_partition.note_affinity( id ); + } + template + friend class finish_reduce; + +public: + //! Constructor used for root task + start_reduce( const Range& range, Body* body, Partitioner& partitioner ) : + my_body(body), + my_range(range), + my_partition(partitioner), + my_context(root_task) + { + } + //! Splitting constructor used to generate children. + /** parent_ becomes left child. Newly constructed object is right child. */ + start_reduce( start_reduce& parent_, typename Partitioner::split_type& split_obj ) : + my_body(parent_.my_body), + my_range(parent_.my_range, split_obj), + my_partition(parent_.my_partition, split_obj), + my_context(right_child) + { + my_partition.set_affinity(*this); + parent_.my_context = left_child; + } + //! Construct right child from the given range as response to the demand. + /** parent_ remains left child. Newly constructed object is right child. */ + start_reduce( start_reduce& parent_, const Range& r, depth_t d ) : + my_body(parent_.my_body), + my_range(r), + my_partition(parent_.my_partition, split()), + my_context(right_child) + { + my_partition.set_affinity(*this); + my_partition.align_depth( d ); // TODO: move into constructor of partitioner + parent_.my_context = left_child; + } + static void run( const Range& range, Body& body, Partitioner& partitioner ) { + if( !range.empty() ) { +#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP + task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) ); +#else + // Bound context prevents exceptions from body to affect nesting or sibling algorithms, + // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. + task_group_context context(PARALLEL_REDUCE); + task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); +#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ + } + } +#if __TBB_TASK_GROUP_CONTEXT + static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) { + if( !range.empty() ) + task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); + } +#endif /* __TBB_TASK_GROUP_CONTEXT */ + //! Run body for range + void run_body( Range &r ) { (*my_body)( r ); } + + //! spawn right task, serves as callback for partitioner + // TODO: remove code duplication from 'offer_work' methods + void offer_work(typename Partitioner::split_type& split_obj) { + task *tasks[2]; + allocate_sibling(static_cast(this), tasks, sizeof(start_reduce), sizeof(finish_type)); + new((void*)tasks[0]) finish_type(my_context); + new((void*)tasks[1]) start_reduce(*this, split_obj); + spawn(*tasks[1]); + } + //! spawn right task, serves as callback for partitioner + void offer_work(const Range& r, depth_t d = 0) { + task *tasks[2]; + allocate_sibling(static_cast(this), tasks, sizeof(start_reduce), sizeof(finish_type)); + new((void*)tasks[0]) finish_type(my_context); + new((void*)tasks[1]) start_reduce(*this, r, d); + spawn(*tasks[1]); + } + }; + + //! allocate right task with new parent + // TODO: 'inline' here is to avoid multiple definition error but for sake of code size this should not be inlined + inline void allocate_sibling(task* start_reduce_task, task *tasks[], size_t start_bytes, size_t finish_bytes) { + tasks[0] = &start_reduce_task->allocate_continuation().allocate(finish_bytes); + start_reduce_task->set_parent(tasks[0]); + tasks[0]->set_ref_count(2); + tasks[1] = &tasks[0]->allocate_child().allocate(start_bytes); + } + + template + task* start_reduce::execute() { + my_partition.check_being_stolen( *this ); + if( my_context==right_child ) { + finish_type* parent_ptr = static_cast(parent()); + if( !itt_load_word_with_acquire(parent_ptr->my_body) ) { // TODO: replace by is_stolen_task() or by parent_ptr->ref_count() == 2??? + my_body = new( parent_ptr->zombie_space.begin() ) Body(*my_body,split()); + parent_ptr->has_right_zombie = true; + } + } else __TBB_ASSERT(my_context==root_task,NULL);// because left leaf spawns right leafs without recycling + my_partition.execute(*this, my_range); + if( my_context==left_child ) { + finish_type* parent_ptr = static_cast(parent()); + __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL); + itt_store_word_with_release(parent_ptr->my_body, my_body ); + } + return NULL; + } + + //! Task type used to combine the partial results of parallel_deterministic_reduce. + /** @ingroup algorithms */ + template + class finish_deterministic_reduce: public task { + Body &my_left_body; + Body my_right_body; + + finish_deterministic_reduce( Body &body ) : + my_left_body( body ), + my_right_body( body, split() ) + { + } + task* execute() __TBB_override { + my_left_body.join( my_right_body ); + return NULL; + } + template + friend class start_deterministic_reduce; + }; + + //! Task type used to split the work of parallel_deterministic_reduce. + /** @ingroup algorithms */ + template + class start_deterministic_reduce: public task { + typedef finish_deterministic_reduce finish_type; + Body &my_body; + Range my_range; + typename Partitioner::task_partition_type my_partition; + task* execute() __TBB_override; + + //! Constructor used for root task + start_deterministic_reduce( const Range& range, Body& body, Partitioner& partitioner ) : + my_body( body ), + my_range( range ), + my_partition( partitioner ) + { + } + //! Splitting constructor used to generate children. + /** parent_ becomes left child. Newly constructed object is right child. */ + start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c, typename Partitioner::split_type& split_obj ) : + my_body( c.my_right_body ), + my_range( parent_.my_range, split_obj ), + my_partition( parent_.my_partition, split_obj ) + { + } + +public: + static void run( const Range& range, Body& body, Partitioner& partitioner ) { + if( !range.empty() ) { +#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP + task::spawn_root_and_wait( *new(task::allocate_root()) start_deterministic_reduce(range,&body,partitioner) ); +#else + // Bound context prevents exceptions from body to affect nesting or sibling algorithms, + // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. + task_group_context context(PARALLEL_REDUCE); + task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body,partitioner) ); +#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ + } + } +#if __TBB_TASK_GROUP_CONTEXT + static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) { + if( !range.empty() ) + task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body,partitioner) ); + } +#endif /* __TBB_TASK_GROUP_CONTEXT */ + + void offer_work( typename Partitioner::split_type& split_obj) { + task* tasks[2]; + allocate_sibling(static_cast(this), tasks, sizeof(start_deterministic_reduce), sizeof(finish_type)); + new((void*)tasks[0]) finish_type(my_body); + new((void*)tasks[1]) start_deterministic_reduce(*this, *static_cast(tasks[0]), split_obj); + spawn(*tasks[1]); + } + + void run_body( Range &r ) { my_body(r); } + }; + + template + task* start_deterministic_reduce::execute() { + my_partition.execute(*this, my_range); + return NULL; + } +} // namespace internal +//! @endcond +} //namespace interfaceX + +//! @cond INTERNAL +namespace internal { + using interface9::internal::start_reduce; + using interface9::internal::start_deterministic_reduce; + //! Auxiliary class for parallel_reduce; for internal use only. + /** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body" + using given \ref parallel_reduce_lambda_req "anonymous function objects". + **/ + /** @ingroup algorithms */ + template + class lambda_reduce_body { + +//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced +// (might require some performance measurements) + + const Value& identity_element; + const RealBody& my_real_body; + const Reduction& my_reduction; + Value my_value; + lambda_reduce_body& operator= ( const lambda_reduce_body& other ); + public: + lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction ) + : identity_element(identity) + , my_real_body(body) + , my_reduction(reduction) + , my_value(identity) + { } + lambda_reduce_body( const lambda_reduce_body& other ) + : identity_element(other.identity_element) + , my_real_body(other.my_real_body) + , my_reduction(other.my_reduction) + , my_value(other.my_value) + { } + lambda_reduce_body( lambda_reduce_body& other, tbb::split ) + : identity_element(other.identity_element) + , my_real_body(other.my_real_body) + , my_reduction(other.my_reduction) + , my_value(other.identity_element) + { } + void operator()(Range& range) { + my_value = my_real_body(range, const_cast(my_value)); + } + void join( lambda_reduce_body& rhs ) { + my_value = my_reduction(const_cast(my_value), const_cast(rhs.my_value)); + } + Value result() const { + return my_value; + } + }; + +} // namespace internal +//! @endcond + +// Requirements on Range concept are documented in blocked_range.h + +/** \page parallel_reduce_body_req Requirements on parallel_reduce body + Class \c Body implementing the concept of parallel_reduce body must define: + - \code Body::Body( Body&, split ); \endcode Splitting constructor. + Must be able to run concurrently with operator() and method \c join + - \code Body::~Body(); \endcode Destructor + - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r + and accumulating the result + - \code void Body::join( Body& b ); \endcode Join results. + The result in \c b should be merged into the result of \c this +**/ + +/** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions) + TO BE DOCUMENTED +**/ + +/** \name parallel_reduce + See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/ +//@{ + +//! Parallel iteration with reduction and default partitioner. +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body ) { + internal::start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER() ); +} + +//! Parallel iteration with reduction and simple_partitioner +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { + internal::start_reduce::run( range, body, partitioner ); +} + +//! Parallel iteration with reduction and auto_partitioner +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) { + internal::start_reduce::run( range, body, partitioner ); +} + +//! Parallel iteration with reduction and static_partitioner +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) { + internal::start_reduce::run( range, body, partitioner ); +} + +//! Parallel iteration with reduction and affinity_partitioner +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) { + internal::start_reduce::run( range, body, partitioner ); +} + +#if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration with reduction, default partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, task_group_context& context ) { + internal::start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER(), context ); +} + +//! Parallel iteration with reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { + internal::start_reduce::run( range, body, partitioner, context ); +} + +//! Parallel iteration with reduction, auto_partitioner and user-supplied context +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) { + internal::start_reduce::run( range, body, partitioner, context ); +} + +//! Parallel iteration with reduction, static_partitioner and user-supplied context +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) { + internal::start_reduce::run( range, body, partitioner, context ); +} + +//! Parallel iteration with reduction, affinity_partitioner and user-supplied context +/** @ingroup algorithms **/ +template +void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) { + internal::start_reduce::run( range, body, partitioner, context ); +} +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +/** parallel_reduce overloads that work with anonymous function objects + (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ + +//! Parallel iteration with reduction and default partitioner. +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const __TBB_DEFAULT_PARTITIONER> + ::run(range, body, __TBB_DEFAULT_PARTITIONER() ); + return body.result(); +} + +//! Parallel iteration with reduction and simple_partitioner. +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const simple_partitioner& partitioner ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const simple_partitioner> + ::run(range, body, partitioner ); + return body.result(); +} + +//! Parallel iteration with reduction and auto_partitioner +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const auto_partitioner& partitioner ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const auto_partitioner> + ::run( range, body, partitioner ); + return body.result(); +} + +//! Parallel iteration with reduction and static_partitioner +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const static_partitioner& partitioner ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const static_partitioner> + ::run( range, body, partitioner ); + return body.result(); +} + +//! Parallel iteration with reduction and affinity_partitioner +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + affinity_partitioner& partitioner ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,affinity_partitioner> + ::run( range, body, partitioner ); + return body.result(); +} + +#if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration with reduction, default partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const __TBB_DEFAULT_PARTITIONER> + ::run( range, body, __TBB_DEFAULT_PARTITIONER(), context ); + return body.result(); +} + +//! Parallel iteration with reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const simple_partitioner& partitioner, task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const simple_partitioner> + ::run( range, body, partitioner, context ); + return body.result(); +} + +//! Parallel iteration with reduction, auto_partitioner and user-supplied context +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const auto_partitioner& partitioner, task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const auto_partitioner> + ::run( range, body, partitioner, context ); + return body.result(); +} + +//! Parallel iteration with reduction, static_partitioner and user-supplied context +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const static_partitioner& partitioner, task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,const static_partitioner> + ::run( range, body, partitioner, context ); + return body.result(); +} + +//! Parallel iteration with reduction, affinity_partitioner and user-supplied context +/** @ingroup algorithms **/ +template +Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + affinity_partitioner& partitioner, task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_reduce,affinity_partitioner> + ::run( range, body, partitioner, context ); + return body.result(); +} +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +//! Parallel iteration with deterministic reduction and default simple partitioner. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body ) { + internal::start_deterministic_reduce::run(range, body, simple_partitioner()); +} + +//! Parallel iteration with deterministic reduction and simple partitioner. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { + internal::start_deterministic_reduce::run(range, body, partitioner); +} + +//! Parallel iteration with deterministic reduction and static partitioner. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner ) { + internal::start_deterministic_reduce::run(range, body, partitioner); +} + +#if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration with deterministic reduction, default simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) { + internal::start_deterministic_reduce::run( range, body, simple_partitioner(), context ); +} + +//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { + internal::start_deterministic_reduce::run(range, body, partitioner, context); +} + +//! Parallel iteration with deterministic reduction, static partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +void parallel_deterministic_reduce( const Range& range, Body& body, const static_partitioner& partitioner, task_group_context& context ) { + internal::start_deterministic_reduce::run(range, body, partitioner, context); +} +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +/** parallel_reduce overloads that work with anonymous function objects + (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ + +//! Parallel iteration with deterministic reduction and default simple partitioner. +// TODO: consider making static_partitioner the default +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { + return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner()); +} + +//! Parallel iteration with deterministic reduction and simple partitioner. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const simple_partitioner& partitioner ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_deterministic_reduce, const simple_partitioner> + ::run(range, body, partitioner); + return body.result(); +} + +//! Parallel iteration with deterministic reduction and static partitioner. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, const static_partitioner& partitioner ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_deterministic_reduce, const static_partitioner> + ::run(range, body, partitioner); + return body.result(); +} +#if __TBB_TASK_GROUP_CONTEXT +//! Parallel iteration with deterministic reduction, default simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + task_group_context& context ) { + return parallel_deterministic_reduce(range, identity, real_body, reduction, simple_partitioner(), context); +} + +//! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const simple_partitioner& partitioner, task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_deterministic_reduce, const simple_partitioner> + ::run(range, body, partitioner, context); + return body.result(); +} + +//! Parallel iteration with deterministic reduction, static partitioner and user-supplied context. +/** @ingroup algorithms **/ +template +Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, + const static_partitioner& partitioner, task_group_context& context ) { + internal::lambda_reduce_body body(identity, real_body, reduction); + internal::start_deterministic_reduce, const static_partitioner> + ::run(range, body, partitioner, context); + return body.result(); +} +#endif /* __TBB_TASK_GROUP_CONTEXT */ +//@} + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_reduce_H_include_area + +#endif /* __TBB_parallel_reduce_H */ diff --git a/ohos/arm64-v8a/include/tbb/parallel_scan.h b/ohos/arm64-v8a/include/tbb/parallel_scan.h new file mode 100644 index 00000000..7930b5c4 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/parallel_scan.h @@ -0,0 +1,416 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_scan_H +#define __TBB_parallel_scan_H + +#define __TBB_parallel_scan_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "task.h" +#include "aligned_space.h" +#include +#include "partitioner.h" + +namespace tbb { + +//! Used to indicate that the initial scan is being performed. +/** @ingroup algorithms */ +struct pre_scan_tag { + static bool is_final_scan() {return false;} + operator bool() {return is_final_scan();} +}; + +//! Used to indicate that the final scan is being performed. +/** @ingroup algorithms */ +struct final_scan_tag { + static bool is_final_scan() {return true;} + operator bool() {return is_final_scan();} +}; + +//! @cond INTERNAL +namespace internal { + + //! Performs final scan for a leaf + /** @ingroup algorithms */ + template + class final_sum: public task { + public: + Body my_body; + private: + aligned_space my_range; + //! Where to put result of last subrange, or NULL if not last subrange. + Body* my_stuff_last; + public: + final_sum( Body& body_ ) : + my_body(body_,split()) + { + poison_pointer(my_stuff_last); + } + ~final_sum() { + my_range.begin()->~Range(); + } + void finish_construction( const Range& range_, Body* stuff_last_ ) { + new( my_range.begin() ) Range(range_); + my_stuff_last = stuff_last_; + } + private: + task* execute() __TBB_override { + my_body( *my_range.begin(), final_scan_tag() ); + if( my_stuff_last ) + my_stuff_last->assign(my_body); + return NULL; + } + }; + + //! Split work to be done in the scan. + /** @ingroup algorithms */ + template + class sum_node: public task { + typedef final_sum final_sum_type; + public: + final_sum_type *my_incoming; + final_sum_type *my_body; + Body *my_stuff_last; + private: + final_sum_type *my_left_sum; + sum_node *my_left; + sum_node *my_right; + bool my_left_is_final; + Range my_range; + sum_node( const Range range_, bool left_is_final_ ) : + my_stuff_last(NULL), + my_left_sum(NULL), + my_left(NULL), + my_right(NULL), + my_left_is_final(left_is_final_), + my_range(range_) + { + // Poison fields that will be set by second pass. + poison_pointer(my_body); + poison_pointer(my_incoming); + } + task* create_child( const Range& range_, final_sum_type& f, sum_node* n, final_sum_type* incoming_, Body* stuff_last_ ) { + if( !n ) { + f.recycle_as_child_of( *this ); + f.finish_construction( range_, stuff_last_ ); + return &f; + } else { + n->my_body = &f; + n->my_incoming = incoming_; + n->my_stuff_last = stuff_last_; + return n; + } + } + task* execute() __TBB_override { + if( my_body ) { + if( my_incoming ) + my_left_sum->my_body.reverse_join( my_incoming->my_body ); + recycle_as_continuation(); + sum_node& c = *this; + task* b = c.create_child(Range(my_range,split()),*my_left_sum,my_right,my_left_sum,my_stuff_last); + task* a = my_left_is_final ? NULL : c.create_child(my_range,*my_body,my_left,my_incoming,NULL); + set_ref_count( (a!=NULL)+(b!=NULL) ); + my_body = NULL; + if( a ) spawn(*b); + else a = b; + return a; + } else { + return NULL; + } + } + template + friend class start_scan; + + template + friend class finish_scan; + }; + + //! Combine partial results + /** @ingroup algorithms */ + template + class finish_scan: public task { + typedef sum_node sum_node_type; + typedef final_sum final_sum_type; + final_sum_type** const my_sum; + sum_node_type*& my_return_slot; + public: + final_sum_type* my_right_zombie; + sum_node_type& my_result; + + task* execute() __TBB_override { + __TBB_ASSERT( my_result.ref_count()==(my_result.my_left!=NULL)+(my_result.my_right!=NULL), NULL ); + if( my_result.my_left ) + my_result.my_left_is_final = false; + if( my_right_zombie && my_sum ) + ((*my_sum)->my_body).reverse_join(my_result.my_left_sum->my_body); + __TBB_ASSERT( !my_return_slot, NULL ); + if( my_right_zombie || my_result.my_right ) { + my_return_slot = &my_result; + } else { + destroy( my_result ); + } + if( my_right_zombie && !my_sum && !my_result.my_right ) { + destroy(*my_right_zombie); + my_right_zombie = NULL; + } + return NULL; + } + + finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, sum_node_type& result_ ) : + my_sum(sum_), + my_return_slot(return_slot_), + my_right_zombie(NULL), + my_result(result_) + { + __TBB_ASSERT( !my_return_slot, NULL ); + } + }; + + //! Initial task to split the work + /** @ingroup algorithms */ + template + class start_scan: public task { + typedef sum_node sum_node_type; + typedef final_sum final_sum_type; + final_sum_type* my_body; + /** Non-null if caller is requesting total. */ + final_sum_type** my_sum; + sum_node_type** my_return_slot; + /** Null if computing root. */ + sum_node_type* my_parent_sum; + bool my_is_final; + bool my_is_right_child; + Range my_range; + typename Partitioner::partition_type my_partition; + task* execute() __TBB_override ; + public: + start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_node_type* parent_sum_ ) : + my_body(parent_.my_body), + my_sum(parent_.my_sum), + my_return_slot(&return_slot_), + my_parent_sum(parent_sum_), + my_is_final(parent_.my_is_final), + my_is_right_child(false), + my_range(parent_.my_range,split()), + my_partition(parent_.my_partition,split()) + { + __TBB_ASSERT( !*my_return_slot, NULL ); + } + + start_scan( sum_node_type*& return_slot_, const Range& range_, final_sum_type& body_, const Partitioner& partitioner_) : + my_body(&body_), + my_sum(NULL), + my_return_slot(&return_slot_), + my_parent_sum(NULL), + my_is_final(true), + my_is_right_child(false), + my_range(range_), + my_partition(partitioner_) + { + __TBB_ASSERT( !*my_return_slot, NULL ); + } + + static void run( const Range& range_, Body& body_, const Partitioner& partitioner_ ) { + if( !range_.empty() ) { + typedef internal::start_scan start_pass1_type; + internal::sum_node* root = NULL; + final_sum_type* temp_body = new(task::allocate_root()) final_sum_type( body_ ); + start_pass1_type& pass1 = *new(task::allocate_root()) start_pass1_type( + /*my_return_slot=*/root, + range_, + *temp_body, + partitioner_ ); + temp_body->my_body.reverse_join(body_); + task::spawn_root_and_wait( pass1 ); + if( root ) { + root->my_body = temp_body; + root->my_incoming = NULL; + root->my_stuff_last = &body_; + task::spawn_root_and_wait( *root ); + } else { + body_.assign(temp_body->my_body); + temp_body->finish_construction( range_, NULL ); + temp_body->destroy(*temp_body); + } + } + } + }; + + template + task* start_scan::execute() { + typedef internal::finish_scan finish_pass1_type; + finish_pass1_type* p = my_parent_sum ? static_cast( parent() ) : NULL; + // Inspecting p->result.left_sum would ordinarily be a race condition. + // But we inspect it only if we are not a stolen task, in which case we + // know that task assigning to p->result.left_sum has completed. + bool treat_as_stolen = my_is_right_child && (is_stolen_task() || my_body!=p->my_result.my_left_sum); + if( treat_as_stolen ) { + // Invocation is for right child that has been really stolen or needs to be virtually stolen + p->my_right_zombie = my_body = new( allocate_root() ) final_sum_type(my_body->my_body); + my_is_final = false; + } + task* next_task = NULL; + if( (my_is_right_child && !treat_as_stolen) || !my_range.is_divisible() || my_partition.should_execute_range(*this) ) { + if( my_is_final ) + (my_body->my_body)( my_range, final_scan_tag() ); + else if( my_sum ) + (my_body->my_body)( my_range, pre_scan_tag() ); + if( my_sum ) + *my_sum = my_body; + __TBB_ASSERT( !*my_return_slot, NULL ); + } else { + sum_node_type* result; + if( my_parent_sum ) + result = new(allocate_additional_child_of(*my_parent_sum)) sum_node_type(my_range,/*my_left_is_final=*/my_is_final); + else + result = new(task::allocate_root()) sum_node_type(my_range,/*my_left_is_final=*/my_is_final); + finish_pass1_type& c = *new( allocate_continuation()) finish_pass1_type(*my_return_slot,my_sum,*result); + // Split off right child + start_scan& b = *new( c.allocate_child() ) start_scan( /*my_return_slot=*/result->my_right, *this, result ); + b.my_is_right_child = true; + // Left child is recycling of *this. Must recycle this before spawning b, + // otherwise b might complete and decrement c.ref_count() to zero, which + // would cause c.execute() to run prematurely. + recycle_as_child_of(c); + c.set_ref_count(2); + c.spawn(b); + my_sum = &result->my_left_sum; + my_return_slot = &result->my_left; + my_is_right_child = false; + next_task = this; + my_parent_sum = result; + __TBB_ASSERT( !*my_return_slot, NULL ); + } + return next_task; + } + + template + class lambda_scan_body : no_assign { + Value my_sum; + const Value& identity_element; + const Scan& my_scan; + const ReverseJoin& my_reverse_join; + public: + lambda_scan_body( const Value& identity, const Scan& scan, const ReverseJoin& rev_join) + : my_sum(identity) + , identity_element(identity) + , my_scan(scan) + , my_reverse_join(rev_join) {} + + lambda_scan_body( lambda_scan_body& b, split ) + : my_sum(b.identity_element) + , identity_element(b.identity_element) + , my_scan(b.my_scan) + , my_reverse_join(b.my_reverse_join) {} + + template + void operator()( const Range& r, Tag tag ) { + my_sum = my_scan(r, my_sum, tag); + } + + void reverse_join( lambda_scan_body& a ) { + my_sum = my_reverse_join(a.my_sum, my_sum); + } + + void assign( lambda_scan_body& b ) { + my_sum = b.my_sum; + } + + Value result() const { + return my_sum; + } + }; +} // namespace internal +//! @endcond + +// Requirements on Range concept are documented in blocked_range.h + +/** \page parallel_scan_body_req Requirements on parallel_scan body + Class \c Body implementing the concept of parallel_scan body must define: + - \code Body::Body( Body&, split ); \endcode Splitting constructor. + Split \c b so that \c this and \c b can accumulate separately + - \code Body::~Body(); \endcode Destructor + - \code void Body::operator()( const Range& r, pre_scan_tag ); \endcode + Preprocess iterations for range \c r + - \code void Body::operator()( const Range& r, final_scan_tag ); \endcode + Do final processing for iterations of range \c r + - \code void Body::reverse_join( Body& a ); \endcode + Merge preprocessing state of \c a into \c this, where \c a was + created earlier from \c b by b's splitting constructor +**/ + +/** \name parallel_scan + See also requirements on \ref range_req "Range" and \ref parallel_scan_body_req "parallel_scan Body". **/ +//@{ + +//! Parallel prefix with default partitioner +/** @ingroup algorithms **/ +template +void parallel_scan( const Range& range, Body& body ) { + internal::start_scan::run(range,body,__TBB_DEFAULT_PARTITIONER()); +} + +//! Parallel prefix with simple_partitioner +/** @ingroup algorithms **/ +template +void parallel_scan( const Range& range, Body& body, const simple_partitioner& partitioner ) { + internal::start_scan::run(range,body,partitioner); +} + +//! Parallel prefix with auto_partitioner +/** @ingroup algorithms **/ +template +void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) { + internal::start_scan::run(range,body,partitioner); +} + +//! Parallel prefix with default partitioner +/** @ingroup algorithms **/ +template +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join ) { + internal::lambda_scan_body body(identity, scan, reverse_join); + tbb::parallel_scan(range,body,__TBB_DEFAULT_PARTITIONER()); + return body.result(); +} + +//! Parallel prefix with simple_partitioner +/** @ingroup algorithms **/ +template +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join, const simple_partitioner& partitioner ) { + internal::lambda_scan_body body(identity, scan, reverse_join); + tbb::parallel_scan(range,body,partitioner); + return body.result(); +} + +//! Parallel prefix with auto_partitioner +/** @ingroup algorithms **/ +template +Value parallel_scan( const Range& range, const Value& identity, const Scan& scan, const ReverseJoin& reverse_join, const auto_partitioner& partitioner ) { + internal::lambda_scan_body body(identity, scan, reverse_join); + tbb::parallel_scan(range,body,partitioner); + return body.result(); +} + +//@} + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_scan_H_include_area + +#endif /* __TBB_parallel_scan_H */ + diff --git a/ohos/arm64-v8a/include/tbb/parallel_sort.h b/ohos/arm64-v8a/include/tbb/parallel_sort.h new file mode 100644 index 00000000..b865b2ee --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/parallel_sort.h @@ -0,0 +1,257 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_sort_H +#define __TBB_parallel_sort_H + +#define __TBB_parallel_sort_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "parallel_for.h" +#include "blocked_range.h" +#include "internal/_range_iterator.h" +#include +#include +#include +#if __TBB_TASK_GROUP_CONTEXT + #include "tbb_profiling.h" +#endif + +namespace tbb { + +namespace interface9 { +//! @cond INTERNAL +namespace internal { + +using tbb::internal::no_assign; + +//! Range used in quicksort to split elements into subranges based on a value. +/** The split operation selects a splitter and places all elements less than or equal + to the value in the first range and the remaining elements in the second range. + @ingroup algorithms */ +template +class quick_sort_range: private no_assign { + + inline size_t median_of_three(const RandomAccessIterator &array, size_t l, size_t m, size_t r) const { + return comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp( array[l], array[r]) ? r : l ) ) + : ( comp(array[r], array[m]) ? m : ( comp( array[r], array[l] ) ? r : l ) ); + } + + inline size_t pseudo_median_of_nine( const RandomAccessIterator &array, const quick_sort_range &range ) const { + size_t offset = range.size/8u; + return median_of_three(array, + median_of_three(array, 0, offset, offset*2), + median_of_three(array, offset*3, offset*4, offset*5), + median_of_three(array, offset*6, offset*7, range.size - 1) ); + + } + + size_t split_range( quick_sort_range& range ) { + using std::iter_swap; + RandomAccessIterator array = range.begin; + RandomAccessIterator key0 = range.begin; + size_t m = pseudo_median_of_nine(array, range); + if (m) iter_swap ( array, array+m ); + + size_t i=0; + size_t j=range.size; + // Partition interval [i+1,j-1] with key *key0. + for(;;) { + __TBB_ASSERT( i=grainsize;} + + quick_sort_range( quick_sort_range& range, split ) + : comp(range.comp) + , size(split_range(range)) + // +1 accounts for the pivot element, which is at its correct place + // already and, therefore, is not included into subranges. + , begin(range.begin+range.size+1) {} +}; + +#if __TBB_TASK_GROUP_CONTEXT +//! Body class used to test if elements in a range are presorted +/** @ingroup algorithms */ +template +class quick_sort_pretest_body : no_assign { + const Compare ∁ + +public: + quick_sort_pretest_body(const Compare &_comp) : comp(_comp) {} + + void operator()( const blocked_range& range ) const { + task &my_task = task::self(); + RandomAccessIterator my_end = range.end(); + + int i = 0; + for (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i) { + if ( i%64 == 0 && my_task.is_cancelled() ) break; + + // The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1 + if ( comp( *(k), *(k-1) ) ) { + my_task.cancel_group_execution(); + break; + } + } + } + +}; +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +//! Body class used to sort elements in a range that is smaller than the grainsize. +/** @ingroup algorithms */ +template +struct quick_sort_body { + void operator()( const quick_sort_range& range ) const { + //SerialQuickSort( range.begin, range.size, range.comp ); + std::sort( range.begin, range.begin + range.size, range.comp ); + } +}; + +//! Wrapper method to initiate the sort by calling parallel_for. +/** @ingroup algorithms */ +template +void parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { +#if __TBB_TASK_GROUP_CONTEXT + task_group_context my_context(PARALLEL_SORT); + const int serial_cutoff = 9; + + __TBB_ASSERT( begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?" ); + RandomAccessIterator k = begin; + for ( ; k != begin + serial_cutoff; ++k ) { + if ( comp( *(k+1), *k ) ) { + goto do_parallel_quick_sort; + } + } + + parallel_for( blocked_range(k+1, end), + quick_sort_pretest_body(comp), + auto_partitioner(), + my_context); + + if (my_context.is_group_execution_cancelled()) +do_parallel_quick_sort: +#endif /* __TBB_TASK_GROUP_CONTEXT */ + parallel_for( quick_sort_range(begin, end-begin, comp ), + quick_sort_body(), + auto_partitioner() ); +} + +} // namespace internal +//! @endcond +} // namespace interfaceX + +/** \page parallel_sort_iter_req Requirements on iterators for parallel_sort + Requirements on the iterator type \c It and its value type \c T for \c parallel_sort: + + - \code void iter_swap( It a, It b ) \endcode Swaps the values of the elements the given + iterators \c a and \c b are pointing to. \c It should be a random access iterator. + + - \code bool Compare::operator()( const T& x, const T& y ) \endcode True if x comes before y; +**/ + +/** \name parallel_sort + See also requirements on \ref parallel_sort_iter_req "iterators for parallel_sort". **/ +//@{ + +//! Sorts the data in [begin,end) using the given comparator +/** The compare function object is used for all comparisons between elements during sorting. + The compare object must define a bool operator() function. + @ingroup algorithms **/ +template +void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp) { + const int min_parallel_size = 500; + if( end > begin ) { + if (end - begin < min_parallel_size) { + std::sort(begin, end, comp); + } else { + interface9::internal::parallel_quick_sort(begin, end, comp); + } + } +} + +//! Sorts the data in [begin,end) with a default comparator \c std::less +/** @ingroup algorithms **/ +template +inline void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end ) { + parallel_sort( begin, end, std::less< typename std::iterator_traits::value_type >() ); +} + +//! Sorts the data in rng using the given comparator +/** @ingroup algorithms **/ +template +void parallel_sort(Range& rng, const Compare& comp) { + parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng), comp); +} + +//! Sorts the data in rng with a default comparator \c std::less +/** @ingroup algorithms **/ +template +void parallel_sort(Range& rng) { + parallel_sort(tbb::internal::first(rng), tbb::internal::last(rng)); +} + +//! Sorts the data in the range \c [begin,end) with a default comparator \c std::less +/** @ingroup algorithms **/ +template +inline void parallel_sort( T * begin, T * end ) { + parallel_sort( begin, end, std::less< T >() ); +} +//@} + + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_sort_H_include_area + +#endif + diff --git a/ohos/arm64-v8a/include/tbb/parallel_while.h b/ohos/arm64-v8a/include/tbb/parallel_while.h new file mode 100644 index 00000000..65984af5 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/parallel_while.h @@ -0,0 +1,188 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_parallel_while +#define __TBB_parallel_while + +#define __TBB_parallel_while_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "task.h" +#include + +namespace tbb { + +template +class parallel_while; + +//! @cond INTERNAL +namespace internal { + + template class while_task; + + //! For internal use only. + /** Executes one iteration of a while. + @ingroup algorithms */ + template + class while_iteration_task: public task { + const Body& my_body; + typename Body::argument_type my_value; + task* execute() __TBB_override { + my_body(my_value); + return NULL; + } + while_iteration_task( const typename Body::argument_type& value, const Body& body ) : + my_body(body), my_value(value) + {} + template friend class while_group_task; + friend class tbb::parallel_while; + }; + + //! For internal use only + /** Unpacks a block of iterations. + @ingroup algorithms */ + template + class while_group_task: public task { + static const size_t max_arg_size = 4; + const Body& my_body; + size_t size; + typename Body::argument_type my_arg[max_arg_size]; + while_group_task( const Body& body ) : my_body(body), size(0) {} + task* execute() __TBB_override { + typedef while_iteration_task iteration_type; + __TBB_ASSERT( size>0, NULL ); + task_list list; + task* t; + size_t k=0; + for(;;) { + t = new( allocate_child() ) iteration_type(my_arg[k],my_body); + if( ++k==size ) break; + list.push_back(*t); + } + set_ref_count(int(k+1)); + spawn(list); + spawn_and_wait_for_all(*t); + return NULL; + } + template friend class while_task; + }; + + //! For internal use only. + /** Gets block of iterations from a stream and packages them into a while_group_task. + @ingroup algorithms */ + template + class while_task: public task { + Stream& my_stream; + const Body& my_body; + empty_task& my_barrier; + task* execute() __TBB_override { + typedef while_group_task block_type; + block_type& t = *new( allocate_additional_child_of(my_barrier) ) block_type(my_body); + size_t k=0; + while( my_stream.pop_if_present(t.my_arg[k]) ) { + if( ++k==block_type::max_arg_size ) { + // There might be more iterations. + recycle_to_reexecute(); + break; + } + } + if( k==0 ) { + destroy(t); + return NULL; + } else { + t.size = k; + return &t; + } + } + while_task( Stream& stream, const Body& body, empty_task& barrier ) : + my_stream(stream), + my_body(body), + my_barrier(barrier) + {} + friend class tbb::parallel_while; + }; + +} // namespace internal +//! @endcond + +//! Parallel iteration over a stream, with optional addition of more work. +/** The Body b has the requirement: \n + "b(v)" \n + "b.argument_type" \n + where v is an argument_type + @ingroup algorithms */ +template +class parallel_while: internal::no_copy { +public: + //! Construct empty non-running parallel while. + parallel_while() : my_body(NULL), my_barrier(NULL) {} + + //! Destructor cleans up data members before returning. + ~parallel_while() { + if( my_barrier ) { + my_barrier->destroy(*my_barrier); + my_barrier = NULL; + } + } + + //! Type of items + typedef typename Body::argument_type value_type; + + //! Apply body.apply to each item in the stream. + /** A Stream s has the requirements \n + "S::value_type" \n + "s.pop_if_present(value) is convertible to bool */ + template + void run( Stream& stream, const Body& body ); + + //! Add a work item while running. + /** Should be executed only by body.apply or a thread spawned therefrom. */ + void add( const value_type& item ); + +private: + const Body* my_body; + empty_task* my_barrier; +}; + +template +template +void parallel_while::run( Stream& stream, const Body& body ) { + using namespace internal; + empty_task& barrier = *new( task::allocate_root() ) empty_task(); + my_body = &body; + my_barrier = &barrier; + my_barrier->set_ref_count(2); + while_task& w = *new( my_barrier->allocate_child() ) while_task( stream, body, barrier ); + my_barrier->spawn_and_wait_for_all(w); + my_barrier->destroy(*my_barrier); + my_barrier = NULL; + my_body = NULL; +} + +template +void parallel_while::add( const value_type& item ) { + __TBB_ASSERT(my_barrier,"attempt to add to parallel_while that is not running"); + typedef internal::while_iteration_task iteration_type; + iteration_type& i = *new( task::allocate_additional_child_of(*my_barrier) ) iteration_type(item,*my_body); + task::self().spawn( i ); +} + +} // namespace + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_parallel_while_H_include_area + +#endif /* __TBB_parallel_while */ diff --git a/ohos/arm64-v8a/include/tbb/partitioner.h b/ohos/arm64-v8a/include/tbb/partitioner.h new file mode 100644 index 00000000..23990868 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/partitioner.h @@ -0,0 +1,681 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_partitioner_H +#define __TBB_partitioner_H + +#define __TBB_partitioner_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#ifndef __TBB_INITIAL_CHUNKS +// initial task divisions per thread +#define __TBB_INITIAL_CHUNKS 2 +#endif +#ifndef __TBB_RANGE_POOL_CAPACITY +// maximum number of elements in range pool +#define __TBB_RANGE_POOL_CAPACITY 8 +#endif +#ifndef __TBB_INIT_DEPTH +// initial value for depth of range pool +#define __TBB_INIT_DEPTH 5 +#endif +#ifndef __TBB_DEMAND_DEPTH_ADD +// when imbalance is found range splits this value times more +#define __TBB_DEMAND_DEPTH_ADD 1 +#endif +#ifndef __TBB_STATIC_THRESHOLD +// necessary number of clocks for the work to be distributed among all tasks +#define __TBB_STATIC_THRESHOLD 40000 +#endif +#if __TBB_DEFINE_MIC +#define __TBB_NONUNIFORM_TASK_CREATION 1 +#ifdef __TBB_time_stamp +#define __TBB_USE_MACHINE_TIME_STAMPS 1 +#define __TBB_task_duration() __TBB_STATIC_THRESHOLD +#endif // __TBB_machine_time_stamp +#endif // __TBB_DEFINE_MIC + +#include "task.h" +#include "task_arena.h" +#include "aligned_space.h" +#include "atomic.h" +#include "internal/_template_helpers.h" + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Workaround for overzealous compiler warnings + #pragma warning (push) + #pragma warning (disable: 4244) +#endif + +namespace tbb { + +class auto_partitioner; +class simple_partitioner; +class static_partitioner; +class affinity_partitioner; + +namespace interface9 { + namespace internal { + class affinity_partition_type; + } +} + +namespace internal { //< @cond INTERNAL +size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor(); + +//! Defines entry point for affinity partitioner into tbb run-time library. +class affinity_partitioner_base_v3: no_copy { + friend class tbb::affinity_partitioner; + friend class tbb::interface9::internal::affinity_partition_type; + //! Array that remembers affinities of tree positions to affinity_id. + /** NULL if my_size==0. */ + affinity_id* my_array; + //! Number of elements in my_array. + size_t my_size; + //! Zeros the fields. + affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {} + //! Deallocates my_array. + ~affinity_partitioner_base_v3() {resize(0);} + //! Resize my_array. + /** Retains values if resulting size is the same. */ + void __TBB_EXPORTED_METHOD resize( unsigned factor ); +}; + +//! Provides backward-compatible methods for partition objects without affinity. +class partition_type_base { +public: + void set_affinity( task & ) {} + void note_affinity( task::affinity_id ) {} + task* continue_after_execute_range() {return NULL;} + bool decide_whether_to_delay() {return false;} + void spawn_or_delay( bool, task& b ) { + task::spawn(b); + } +}; + +template class start_scan; + +} //< namespace internal @endcond + +namespace serial { +namespace interface9 { +template class start_for; +} +} + +namespace interface9 { +//! @cond INTERNAL +namespace internal { +using namespace tbb::internal; +template class start_for; +template class start_reduce; +template class start_deterministic_reduce; + +//! Join task node that contains shared flag for stealing feedback +class flag_task: public task { +public: + tbb::atomic my_child_stolen; + flag_task() { my_child_stolen = false; } + task* execute() __TBB_override { return NULL; } + static void mark_task_stolen(task &t) { + tbb::atomic &flag = static_cast(t.parent())->my_child_stolen; +#if TBB_USE_THREADING_TOOLS + // Threading tools respect lock prefix but report false-positive data-race via plain store + flag.fetch_and_store(true); +#else + flag = true; +#endif //TBB_USE_THREADING_TOOLS + } + static bool is_peer_stolen(task &t) { + return static_cast(t.parent())->my_child_stolen; + } +}; + +//! Depth is a relative depth of recursive division inside a range pool. Relative depth allows +//! infinite absolute depth of the recursion for heavily unbalanced workloads with range represented +//! by a number that cannot fit into machine word. +typedef unsigned char depth_t; + +//! Range pool stores ranges of type T in a circular buffer with MaxCapacity +template +class range_vector { + depth_t my_head; + depth_t my_tail; + depth_t my_size; + depth_t my_depth[MaxCapacity]; // relative depths of stored ranges + tbb::aligned_space my_pool; + +public: + //! initialize via first range in pool + range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) { + my_depth[0] = 0; + new( static_cast(my_pool.begin()) ) T(elem);//TODO: std::move? + } + ~range_vector() { + while( !empty() ) pop_back(); + } + bool empty() const { return my_size == 0; } + depth_t size() const { return my_size; } + //! Populates range pool via ranges up to max depth or while divisible + //! max_depth starts from 0, e.g. value 2 makes 3 ranges in the pool up to two 1/4 pieces + void split_to_fill(depth_t max_depth) { + while( my_size < MaxCapacity && is_divisible(max_depth) ) { + depth_t prev = my_head; + my_head = (my_head + 1) % MaxCapacity; + new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move? + my_pool.begin()[prev].~T(); // instead of assignment + new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split()); // do 'inverse' split + my_depth[my_head] = ++my_depth[prev]; + my_size++; + } + } + void pop_back() { + __TBB_ASSERT(my_size > 0, "range_vector::pop_back() with empty size"); + my_pool.begin()[my_head].~T(); + my_size--; + my_head = (my_head + MaxCapacity - 1) % MaxCapacity; + } + void pop_front() { + __TBB_ASSERT(my_size > 0, "range_vector::pop_front() with empty size"); + my_pool.begin()[my_tail].~T(); + my_size--; + my_tail = (my_tail + 1) % MaxCapacity; + } + T& back() { + __TBB_ASSERT(my_size > 0, "range_vector::back() with empty size"); + return my_pool.begin()[my_head]; + } + T& front() { + __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size"); + return my_pool.begin()[my_tail]; + } + //! similarly to front(), returns depth of the first range in the pool + depth_t front_depth() { + __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty size"); + return my_depth[my_tail]; + } + depth_t back_depth() { + __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty size"); + return my_depth[my_head]; + } + bool is_divisible(depth_t max_depth) { + return back_depth() < max_depth && back().is_divisible(); + } +}; + +//! Provides default methods for partition objects and common algorithm blocks. +template +struct partition_type_base { + typedef split split_type; + // decision makers + void set_affinity( task & ) {} + void note_affinity( task::affinity_id ) {} + bool check_being_stolen(task &) { return false; } // part of old should_execute_range() + bool check_for_demand(task &) { return false; } + bool is_divisible() { return true; } // part of old should_execute_range() + depth_t max_depth() { return 0; } + void align_depth(depth_t) { } + template split_type get_split() { return split(); } + Partition& self() { return *static_cast(this); } // CRTP helper + + template + void work_balance(StartType &start, Range &range) { + start.run_body( range ); // simple partitioner goes always here + } + + template + void execute(StartType &start, Range &range) { + // The algorithm in a few words ([]-denotes calls to decision methods of partitioner): + // [If this task is stolen, adjust depth and divisions if necessary, set flag]. + // If range is divisible { + // Spread the work while [initial divisions left]; + // Create trap task [if necessary]; + // } + // If not divisible or [max depth is reached], execute, else do the range pool part + if ( range.is_divisible() ) { + if ( self().is_divisible() ) { + do { // split until is divisible + typename Partition::split_type split_obj = self().template get_split(); + start.offer_work( split_obj ); + } while ( range.is_divisible() && self().is_divisible() ); + } + } + self().work_balance(start, range); + } +}; + +//! Provides default splitting strategy for partition objects. +template +struct adaptive_mode : partition_type_base { + typedef Partition my_partition; + size_t my_divisor; + // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves. + // A task which has only one index must produce the right split without reserved index in order to avoid + // it to be overwritten in note_affinity() of the created (right) task. + // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order) + static const unsigned factor = 1; + adaptive_mode() : my_divisor(tbb::internal::get_initial_auto_partitioner_divisor() / 4 * my_partition::factor) {} + adaptive_mode(adaptive_mode &src, split) : my_divisor(do_split(src, split())) {} + /*! Override do_split methods in order to specify splitting strategy */ + size_t do_split(adaptive_mode &src, split) { + return src.my_divisor /= 2u; + } +}; + +//! A helper class to create a proportional_split object for a given type of Range. +/** If the Range has static boolean constant 'is_splittable_in_proportion' set to 'true', + the created object splits a provided value in an implemenation-defined proportion; + otherwise it represents equal-size split. */ +// TODO: check if this helper can be a nested class of proportional_mode. +template +struct proportion_helper { + static proportional_split get_split(size_t) { return proportional_split(1,1); } +}; +template +struct proportion_helper::type> { + static proportional_split get_split(size_t n) { +#if __TBB_NONUNIFORM_TASK_CREATION + size_t right = (n + 2) / 3; +#else + size_t right = n / 2; +#endif + size_t left = n - right; + return proportional_split(left, right); + } +}; + +//! Provides proportional splitting strategy for partition objects +template +struct proportional_mode : adaptive_mode { + typedef Partition my_partition; + using partition_type_base::self; // CRTP helper to get access to derived classes + + proportional_mode() : adaptive_mode() {} + proportional_mode(proportional_mode &src, split) : adaptive_mode(src, split()) {} + proportional_mode(proportional_mode &src, const proportional_split& split_obj) { self().my_divisor = do_split(src, split_obj); } + size_t do_split(proportional_mode &src, const proportional_split& split_obj) { +#if __TBB_ENABLE_RANGE_FEEDBACK + size_t portion = size_t(float(src.my_divisor) * float(split_obj.right()) + / float(split_obj.left() + split_obj.right()) + 0.5f); +#else + size_t portion = split_obj.right() * my_partition::factor; +#endif + portion = (portion + my_partition::factor/2) & (0ul - my_partition::factor); +#if __TBB_ENABLE_RANGE_FEEDBACK + /** Corner case handling */ + if (!portion) + portion = my_partition::factor; + else if (portion == src.my_divisor) + portion = src.my_divisor - my_partition::factor; +#endif + src.my_divisor -= portion; + return portion; + } + bool is_divisible() { // part of old should_execute_range() + return self().my_divisor > my_partition::factor; + } + template + proportional_split get_split() { + // Create a proportion for the number of threads expected to handle "this" subrange + return proportion_helper::get_split( self().my_divisor / my_partition::factor ); + } +}; + +static size_t get_initial_partition_head() { + int current_index = tbb::this_task_arena::current_thread_index(); + if (current_index == tbb::task_arena::not_initialized) + current_index = 0; + return size_t(current_index); +} + +//! Provides default linear indexing of partitioner's sequence +template +struct linear_affinity_mode : proportional_mode { + size_t my_head; + size_t my_max_affinity; + using proportional_mode::self; + linear_affinity_mode() : proportional_mode(), my_head(get_initial_partition_head()), + my_max_affinity(self().my_divisor) {} + linear_affinity_mode(linear_affinity_mode &src, split) : proportional_mode(src, split()) + , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {} + linear_affinity_mode(linear_affinity_mode &src, const proportional_split& split_obj) : proportional_mode(src, split_obj) + , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {} + void set_affinity( task &t ) { + if( self().my_divisor ) + t.set_affinity( affinity_id(my_head) + 1 ); + } +}; + +/*! Determine work-balance phase implementing splitting & stealing actions */ +template +struct dynamic_grainsize_mode : Mode { + using Mode::self; +#ifdef __TBB_USE_MACHINE_TIME_STAMPS + tbb::internal::machine_tsc_t my_dst_tsc; +#endif + enum { + begin = 0, + run, + pass + } my_delay; + depth_t my_max_depth; + static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY; + dynamic_grainsize_mode(): Mode() +#ifdef __TBB_USE_MACHINE_TIME_STAMPS + , my_dst_tsc(0) +#endif + , my_delay(begin) + , my_max_depth(__TBB_INIT_DEPTH) {} + dynamic_grainsize_mode(dynamic_grainsize_mode& p, split) + : Mode(p, split()) +#ifdef __TBB_USE_MACHINE_TIME_STAMPS + , my_dst_tsc(0) +#endif + , my_delay(pass) + , my_max_depth(p.my_max_depth) {} + dynamic_grainsize_mode(dynamic_grainsize_mode& p, const proportional_split& split_obj) + : Mode(p, split_obj) +#ifdef __TBB_USE_MACHINE_TIME_STAMPS + , my_dst_tsc(0) +#endif + , my_delay(begin) + , my_max_depth(p.my_max_depth) {} + bool check_being_stolen(task &t) { // part of old should_execute_range() + if( !(self().my_divisor / Mode::my_partition::factor) ) { // if not from the top P tasks of binary tree + self().my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)? + if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // runs concurrently with the left task +#if __TBB_USE_OPTIONAL_RTTI + // RTTI is available, check whether the cast is valid + __TBB_ASSERT(dynamic_cast(t.parent()), 0); + // correctness of the cast relies on avoiding the root task for which: + // - initial value of my_divisor != 0 (protected by separate assertion) + // - is_stolen_task() always returns false for the root task. +#endif + flag_task::mark_task_stolen(t); + if( !my_max_depth ) my_max_depth++; + my_max_depth += __TBB_DEMAND_DEPTH_ADD; + return true; + } + } + return false; + } + depth_t max_depth() { return my_max_depth; } + void align_depth(depth_t base) { + __TBB_ASSERT(base <= my_max_depth, 0); + my_max_depth -= base; + } + template + void work_balance(StartType &start, Range &range) { + if( !range.is_divisible() || !self().max_depth() ) { + start.run_body( range ); // simple partitioner goes always here + } + else { // do range pool + internal::range_vector range_pool(range); + do { + range_pool.split_to_fill(self().max_depth()); // fill range pool + if( self().check_for_demand( start ) ) { + if( range_pool.size() > 1 ) { + start.offer_work( range_pool.front(), range_pool.front_depth() ); + range_pool.pop_front(); + continue; + } + if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task + continue; // note: next split_to_fill() should split range at least once + } + start.run_body( range_pool.back() ); + range_pool.pop_back(); + } while( !range_pool.empty() && !start.is_cancelled() ); + } + } + bool check_for_demand( task &t ) { + if( pass == my_delay ) { + if( self().my_divisor > 1 ) // produce affinitized tasks while they have slot in array + return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more + else if( self().my_divisor && my_max_depth ) { // make balancing task + self().my_divisor = 0; // once for each task; depth will be decreased in align_depth() + return true; + } + else if( flag_task::is_peer_stolen(t) ) { + my_max_depth += __TBB_DEMAND_DEPTH_ADD; + return true; + } + } else if( begin == my_delay ) { +#ifndef __TBB_USE_MACHINE_TIME_STAMPS + my_delay = pass; +#else + my_dst_tsc = __TBB_time_stamp() + __TBB_task_duration(); + my_delay = run; + } else if( run == my_delay ) { + if( __TBB_time_stamp() < my_dst_tsc ) { + __TBB_ASSERT(my_max_depth > 0, NULL); + my_max_depth--; // increase granularity since tasks seem having too small work + return false; + } + my_delay = pass; + return true; +#endif // __TBB_USE_MACHINE_TIME_STAMPS + } + return false; + } +}; + +class auto_partition_type: public dynamic_grainsize_mode > { +public: + auto_partition_type( const auto_partitioner& ) + : dynamic_grainsize_mode >() { + my_divisor *= __TBB_INITIAL_CHUNKS; + } + auto_partition_type( auto_partition_type& src, split) + : dynamic_grainsize_mode >(src, split()) {} + bool is_divisible() { // part of old should_execute_range() + if( my_divisor > 1 ) return true; + if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead + // keep same fragmentation while splitting for the local task pool + my_max_depth--; + my_divisor = 0; // decrease max_depth once per task + return true; + } else return false; + } + bool check_for_demand(task &t) { + if( flag_task::is_peer_stolen(t) ) { + my_max_depth += __TBB_DEMAND_DEPTH_ADD; + return true; + } else return false; + } +}; + +class simple_partition_type: public partition_type_base { +public: + simple_partition_type( const simple_partitioner& ) {} + simple_partition_type( const simple_partition_type&, split ) {} + //! simplified algorithm + template + void execute(StartType &start, Range &range) { + split_type split_obj = split(); // start.offer_work accepts split_type as reference + while( range.is_divisible() ) + start.offer_work( split_obj ); + start.run_body( range ); + } +}; + +class static_partition_type : public linear_affinity_mode { +public: + typedef proportional_split split_type; + static_partition_type( const static_partitioner& ) + : linear_affinity_mode() {} + static_partition_type( static_partition_type& p, split ) + : linear_affinity_mode(p, split()) {} + static_partition_type( static_partition_type& p, const proportional_split& split_obj ) + : linear_affinity_mode(p, split_obj) {} +}; + +class affinity_partition_type : public dynamic_grainsize_mode > { + static const unsigned factor_power = 4; // TODO: get a unified formula based on number of computing units + tbb::internal::affinity_id* my_array; +public: + static const unsigned factor = 1 << factor_power; // number of slots in affinity array per task + typedef proportional_split split_type; + affinity_partition_type( tbb::internal::affinity_partitioner_base_v3& ap ) + : dynamic_grainsize_mode >() { + __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" ); + ap.resize(factor); + my_array = ap.my_array; + my_max_depth = factor_power + 1; + __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 ); + } + affinity_partition_type(affinity_partition_type& p, split) + : dynamic_grainsize_mode >(p, split()) + , my_array(p.my_array) {} + affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj) + : dynamic_grainsize_mode >(p, split_obj) + , my_array(p.my_array) {} + void set_affinity( task &t ) { + if( my_divisor ) { + if( !my_array[my_head] ) + // TODO: consider new ideas with my_array for both affinity and static partitioner's, then code reuse + t.set_affinity( affinity_id(my_head / factor + 1) ); + else + t.set_affinity( my_array[my_head] ); + } + } + void note_affinity( task::affinity_id id ) { + if( my_divisor ) + my_array[my_head] = id; + } +}; + +//! Backward-compatible partition for auto and affinity partition objects. +class old_auto_partition_type: public tbb::internal::partition_type_base { + size_t num_chunks; + static const size_t VICTIM_CHUNKS = 4; +public: + bool should_execute_range(const task &t) { + if( num_chunks friend class serial::interface9::start_for; + template friend class interface9::internal::start_for; + template friend class interface9::internal::start_reduce; + template friend class interface9::internal::start_deterministic_reduce; + template friend class internal::start_scan; + // backward compatibility + class partition_type: public internal::partition_type_base { + public: + bool should_execute_range(const task& ) {return false;} + partition_type( const simple_partitioner& ) {} + partition_type( const partition_type&, split ) {} + }; + // new implementation just extends existing interface + typedef interface9::internal::simple_partition_type task_partition_type; + + // TODO: consider to make split_type public + typedef interface9::internal::simple_partition_type::split_type split_type; +}; + +//! An auto partitioner +/** The range is initial divided into several large chunks. + Chunks are further subdivided into smaller pieces if demand detected and they are divisible. + @ingroup algorithms */ +class auto_partitioner { +public: + auto_partitioner() {} + +private: + template friend class serial::interface9::start_for; + template friend class interface9::internal::start_for; + template friend class interface9::internal::start_reduce; + template friend class internal::start_scan; + // backward compatibility + typedef interface9::internal::old_auto_partition_type partition_type; + // new implementation just extends existing interface + typedef interface9::internal::auto_partition_type task_partition_type; + + // TODO: consider to make split_type public + typedef interface9::internal::auto_partition_type::split_type split_type; +}; + +//! A static partitioner +class static_partitioner { +public: + static_partitioner() {} +private: + template friend class serial::interface9::start_for; + template friend class interface9::internal::start_for; + template friend class interface9::internal::start_reduce; + template friend class interface9::internal::start_deterministic_reduce; + template friend class internal::start_scan; + // backward compatibility + typedef interface9::internal::old_auto_partition_type partition_type; + // new implementation just extends existing interface + typedef interface9::internal::static_partition_type task_partition_type; + + // TODO: consider to make split_type public + typedef interface9::internal::static_partition_type::split_type split_type; +}; + +//! An affinity partitioner +class affinity_partitioner: internal::affinity_partitioner_base_v3 { +public: + affinity_partitioner() {} + +private: + template friend class serial::interface9::start_for; + template friend class interface9::internal::start_for; + template friend class interface9::internal::start_reduce; + template friend class internal::start_scan; + // backward compatibility - for parallel_scan only + typedef interface9::internal::old_auto_partition_type partition_type; + // new implementation just extends existing interface + typedef interface9::internal::affinity_partition_type task_partition_type; + + // TODO: consider to make split_type public + typedef interface9::internal::affinity_partition_type::split_type split_type; +}; + +} // namespace tbb + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif // warning 4244 is back +#undef __TBB_INITIAL_CHUNKS +#undef __TBB_RANGE_POOL_CAPACITY +#undef __TBB_INIT_DEPTH + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_partitioner_H_include_area + +#endif /* __TBB_partitioner_H */ diff --git a/ohos/arm64-v8a/include/tbb/pipeline.h b/ohos/arm64-v8a/include/tbb/pipeline.h new file mode 100644 index 00000000..13bf4e33 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/pipeline.h @@ -0,0 +1,682 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_pipeline_H +#define __TBB_pipeline_H + +#define __TBB_pipeline_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "atomic.h" +#include "task.h" +#include "tbb_allocator.h" +#include + +#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT +#include +#endif + +namespace tbb { + +class pipeline; +class filter; + +//! @cond INTERNAL +namespace internal { + +// The argument for PIPELINE_VERSION should be an integer between 2 and 9 +#define __TBB_PIPELINE_VERSION(x) ((unsigned char)(x-2)<<1) + +typedef unsigned long Token; +typedef long tokendiff_t; +class stage_task; +class input_buffer; +class pipeline_root_task; +class pipeline_cleaner; + +} // namespace internal + +namespace interface6 { + template class filter_t; + + namespace internal { + class pipeline_proxy; + } +} + +//! @endcond + +//! A stage in a pipeline. +/** @ingroup algorithms */ +class filter: internal::no_copy { +private: + //! Value used to mark "not in pipeline" + static filter* not_in_pipeline() { return reinterpret_cast(intptr_t(-1)); } +protected: + //! The lowest bit 0 is for parallel vs. serial + static const unsigned char filter_is_serial = 0x1; + + //! 4th bit distinguishes ordered vs unordered filters. + /** The bit was not set for parallel filters in TBB 2.1 and earlier, + but is_ordered() function always treats parallel filters as out of order. */ + static const unsigned char filter_is_out_of_order = 0x1<<4; + + //! 5th bit distinguishes thread-bound and regular filters. + static const unsigned char filter_is_bound = 0x1<<5; + + //! 6th bit marks input filters emitting small objects + static const unsigned char filter_may_emit_null = 0x1<<6; + + //! 7th bit defines exception propagation mode expected by the application. + static const unsigned char exact_exception_propagation = +#if TBB_USE_CAPTURED_EXCEPTION + 0x0; +#else + 0x1<<7; +#endif /* TBB_USE_CAPTURED_EXCEPTION */ + + static const unsigned char current_version = __TBB_PIPELINE_VERSION(5); + static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for version +public: + enum mode { + //! processes multiple items in parallel and in no particular order + parallel = current_version | filter_is_out_of_order, + //! processes items one at a time; all such filters process items in the same order + serial_in_order = current_version | filter_is_serial, + //! processes items one at a time and in no particular order + serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order, + //! @deprecated use serial_in_order instead + serial = serial_in_order + }; +protected: + explicit filter( bool is_serial_ ) : + next_filter_in_pipeline(not_in_pipeline()), + my_input_buffer(NULL), + my_filter_mode(static_cast((is_serial_ ? serial : parallel) | exact_exception_propagation)), + prev_filter_in_pipeline(not_in_pipeline()), + my_pipeline(NULL), + next_segment(NULL) + {} + + explicit filter( mode filter_mode ) : + next_filter_in_pipeline(not_in_pipeline()), + my_input_buffer(NULL), + my_filter_mode(static_cast(filter_mode | exact_exception_propagation)), + prev_filter_in_pipeline(not_in_pipeline()), + my_pipeline(NULL), + next_segment(NULL) + {} + + // signal end-of-input for concrete_filters + void __TBB_EXPORTED_METHOD set_end_of_input(); + +public: + //! True if filter is serial. + bool is_serial() const { + return bool( my_filter_mode & filter_is_serial ); + } + + //! True if filter must receive stream in order. + bool is_ordered() const { + return (my_filter_mode & (filter_is_out_of_order|filter_is_serial))==filter_is_serial; + } + + //! True if filter is thread-bound. + bool is_bound() const { + return ( my_filter_mode & filter_is_bound )==filter_is_bound; + } + + //! true if an input filter can emit null + bool object_may_be_null() { + return ( my_filter_mode & filter_may_emit_null ) == filter_may_emit_null; + } + + //! Operate on an item from the input stream, and return item for output stream. + /** Returns NULL if filter is a sink. */ + virtual void* operator()( void* item ) = 0; + + //! Destroy filter. + /** If the filter was added to a pipeline, the pipeline must be destroyed first. */ + virtual __TBB_EXPORTED_METHOD ~filter(); + +#if __TBB_TASK_GROUP_CONTEXT + //! Destroys item if pipeline was cancelled. + /** Required to prevent memory leaks. + Note it can be called concurrently even for serial filters.*/ + virtual void finalize( void* /*item*/ ) {} +#endif + +private: + //! Pointer to next filter in the pipeline. + filter* next_filter_in_pipeline; + + //! has the filter not yet processed all the tokens it will ever see? + // (pipeline has not yet reached end_of_input or this filter has not yet + // seen the last token produced by input_filter) + bool has_more_work(); + + //! Buffer for incoming tokens, or NULL if not required. + /** The buffer is required if the filter is serial or follows a thread-bound one. */ + internal::input_buffer* my_input_buffer; + + friend class internal::stage_task; + friend class internal::pipeline_root_task; + friend class pipeline; + friend class thread_bound_filter; + + //! Storage for filter mode and dynamically checked implementation version. + const unsigned char my_filter_mode; + + //! Pointer to previous filter in the pipeline. + filter* prev_filter_in_pipeline; + + //! Pointer to the pipeline. + pipeline* my_pipeline; + + //! Pointer to the next "segment" of filters, or NULL if not required. + /** In each segment, the first filter is not thread-bound but follows a thread-bound one. */ + filter* next_segment; +}; + +//! A stage in a pipeline served by a user thread. +/** @ingroup algorithms */ +class thread_bound_filter: public filter { +public: + enum result_type { + // item was processed + success, + // item is currently not available + item_not_available, + // there are no more items to process + end_of_stream + }; +protected: + explicit thread_bound_filter(mode filter_mode): + filter(static_cast(filter_mode | filter::filter_is_bound)) + { + __TBB_ASSERT(filter_mode & filter::filter_is_serial, "thread-bound filters must be serial"); + } +public: + //! If a data item is available, invoke operator() on that item. + /** This interface is non-blocking. + Returns 'success' if an item was processed. + Returns 'item_not_available' if no item can be processed now + but more may arrive in the future, or if token limit is reached. + Returns 'end_of_stream' if there are no more items to process. */ + result_type __TBB_EXPORTED_METHOD try_process_item(); + + //! Wait until a data item becomes available, and invoke operator() on that item. + /** This interface is blocking. + Returns 'success' if an item was processed. + Returns 'end_of_stream' if there are no more items to process. + Never returns 'item_not_available', as it blocks until another return condition applies. */ + result_type __TBB_EXPORTED_METHOD process_item(); + +private: + //! Internal routine for item processing + result_type internal_process_item(bool is_blocking); +}; + +//! A processing pipeline that applies filters to items. +/** @ingroup algorithms */ +class __TBB_DEPRECATED_MSG("tbb::pipeline is deprecated, use tbb::parallel_pipeline") pipeline { +public: + //! Construct empty pipeline. + __TBB_EXPORTED_METHOD pipeline(); + + /** Though the current implementation declares the destructor virtual, do not rely on this + detail. The virtualness is deprecated and may disappear in future versions of TBB. */ + virtual __TBB_EXPORTED_METHOD ~pipeline(); + + //! Add filter to end of pipeline. + void __TBB_EXPORTED_METHOD add_filter( filter& filter_ ); + + //! Run the pipeline to completion. + void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens ); + +#if __TBB_TASK_GROUP_CONTEXT + //! Run the pipeline to completion with user-supplied context. + void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context& context ); +#endif + + //! Remove all filters from the pipeline. + void __TBB_EXPORTED_METHOD clear(); + +private: + friend class internal::stage_task; + friend class internal::pipeline_root_task; + friend class filter; + friend class thread_bound_filter; + friend class internal::pipeline_cleaner; + friend class tbb::interface6::internal::pipeline_proxy; + + //! Pointer to first filter in the pipeline. + filter* filter_list; + + //! Pointer to location where address of next filter to be added should be stored. + filter* filter_end; + + //! task who's reference count is used to determine when all stages are done. + task* end_counter; + + //! Number of idle tokens waiting for input stage. + atomic input_tokens; + + //! Global counter of tokens + atomic token_counter; + + //! False until fetch_input returns NULL. + bool end_of_input; + + //! True if the pipeline contains a thread-bound filter; false otherwise. + bool has_thread_bound_filters; + + //! Remove filter from pipeline. + void remove_filter( filter& filter_ ); + + //! Not used, but retained to satisfy old export files. + void __TBB_EXPORTED_METHOD inject_token( task& self ); + +#if __TBB_TASK_GROUP_CONTEXT + //! Does clean up if pipeline is cancelled or exception occurred + void clear_filters(); +#endif +}; + +//------------------------------------------------------------------------ +// Support for lambda-friendly parallel_pipeline interface +//------------------------------------------------------------------------ + +namespace interface6 { + +namespace internal { + template class concrete_filter; +} + +//! input_filter control to signal end-of-input for parallel_pipeline +class flow_control { + bool is_pipeline_stopped; + flow_control() { is_pipeline_stopped = false; } + template friend class internal::concrete_filter; +public: + void stop() { is_pipeline_stopped = true; } +}; + +//! @cond INTERNAL +namespace internal { + +// Emulate std::is_trivially_copyable (false positives not allowed, false negatives suboptimal but safe). +#if __TBB_CPP11_TYPE_PROPERTIES_PRESENT +template struct tbb_trivially_copyable { enum { value = std::is_trivially_copyable::value }; }; +#else +template struct tbb_trivially_copyable { enum { value = false }; }; +template struct tbb_trivially_copyable < T* > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < bool > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < char > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < signed char > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < short > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < int > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long > { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long long> { enum { value = true }; }; +template<> struct tbb_trivially_copyable { enum { value = true }; }; +template<> struct tbb_trivially_copyable < float > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < double > { enum { value = true }; }; +template<> struct tbb_trivially_copyable < long double > { enum { value = true }; }; +#if !_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED) +template<> struct tbb_trivially_copyable < wchar_t > { enum { value = true }; }; +#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ +#endif // tbb_trivially_copyable + +template +struct use_allocator { + enum { value = sizeof(T) > sizeof(void *) || !tbb_trivially_copyable::value }; +}; + +// A helper class to customize how a type is passed between filters. +// Usage: token_helper::value> +template class token_helper; + +// using tbb_allocator +template +class token_helper { +public: + typedef typename tbb::tbb_allocator allocator; + typedef T* pointer; + typedef T value_type; +#if __TBB_CPP11_RVALUE_REF_PRESENT + static pointer create_token(value_type && source) +#else + static pointer create_token(const value_type & source) +#endif + { + pointer output_t = allocator().allocate(1); + return new (output_t) T(tbb::internal::move(source)); + } + static value_type & token(pointer & t) { return *t; } + static void * cast_to_void_ptr(pointer ref) { return (void *) ref; } + static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; } + static void destroy_token(pointer token) { + allocator().destroy(token); + allocator().deallocate(token,1); + } +}; + +// pointer specialization +template +class token_helper { +public: + typedef T* pointer; + typedef T* value_type; + static pointer create_token(const value_type & source) { return source; } + static value_type & token(pointer & t) { return t; } + static void * cast_to_void_ptr(pointer ref) { return (void *)ref; } + static pointer cast_from_void_ptr(void * ref) { return (pointer)ref; } + static void destroy_token( pointer /*token*/) {} +}; + +// converting type to and from void*, passing objects directly +template +class token_helper { + typedef union { + T actual_value; + void * void_overlay; + } type_to_void_ptr_map; +public: + typedef T pointer; // not really a pointer in this case. + typedef T value_type; + static pointer create_token(const value_type & source) { return source; } + static value_type & token(pointer & t) { return t; } + static void * cast_to_void_ptr(pointer ref) { + type_to_void_ptr_map mymap; + mymap.void_overlay = NULL; + mymap.actual_value = ref; + return mymap.void_overlay; + } + static pointer cast_from_void_ptr(void * ref) { + type_to_void_ptr_map mymap; + mymap.void_overlay = ref; + return mymap.actual_value; + } + static void destroy_token( pointer /*token*/) {} +}; + +// intermediate +template +class concrete_filter: public tbb::filter { + const Body& my_body; + typedef token_helper::value> t_helper; + typedef typename t_helper::pointer t_pointer; + typedef token_helper::value> u_helper; + typedef typename u_helper::pointer u_pointer; + + void* operator()(void* input) __TBB_override { + t_pointer temp_input = t_helper::cast_from_void_ptr(input); + u_pointer output_u = u_helper::create_token(my_body(tbb::internal::move(t_helper::token(temp_input)))); + t_helper::destroy_token(temp_input); + return u_helper::cast_to_void_ptr(output_u); + } + + void finalize(void * input) __TBB_override { + t_pointer temp_input = t_helper::cast_from_void_ptr(input); + t_helper::destroy_token(temp_input); + } + +public: + concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} +}; + +// input +template +class concrete_filter: public filter { + const Body& my_body; + typedef token_helper::value> u_helper; + typedef typename u_helper::pointer u_pointer; + + void* operator()(void*) __TBB_override { + flow_control control; + u_pointer output_u = u_helper::create_token(my_body(control)); + if(control.is_pipeline_stopped) { + u_helper::destroy_token(output_u); + set_end_of_input(); + return NULL; + } + return u_helper::cast_to_void_ptr(output_u); + } + +public: + concrete_filter(tbb::filter::mode filter_mode, const Body& body) : + filter(static_cast(filter_mode | filter_may_emit_null)), + my_body(body) + {} +}; + +// output +template +class concrete_filter: public filter { + const Body& my_body; + typedef token_helper::value> t_helper; + typedef typename t_helper::pointer t_pointer; + + void* operator()(void* input) __TBB_override { + t_pointer temp_input = t_helper::cast_from_void_ptr(input); + my_body(tbb::internal::move(t_helper::token(temp_input))); + t_helper::destroy_token(temp_input); + return NULL; + } + void finalize(void* input) __TBB_override { + t_pointer temp_input = t_helper::cast_from_void_ptr(input); + t_helper::destroy_token(temp_input); + } + +public: + concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} +}; + +template +class concrete_filter: public filter { + const Body& my_body; + + void* operator()(void*) __TBB_override { + flow_control control; + my_body(control); + void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_t)-1; + return output; + } +public: + concrete_filter(filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} +}; + +//! The class that represents an object of the pipeline for parallel_pipeline(). +/** It primarily serves as RAII class that deletes heap-allocated filter instances. */ +class pipeline_proxy { + tbb::pipeline my_pipe; +public: + pipeline_proxy( const filter_t& filter_chain ); + ~pipeline_proxy() { + while( filter* f = my_pipe.filter_list ) + delete f; // filter destructor removes it from the pipeline + } + tbb::pipeline* operator->() { return &my_pipe; } +}; + +//! Abstract base class that represents a node in a parse tree underlying a filter_t. +/** These nodes are always heap-allocated and can be shared by filter_t objects. */ +class filter_node: tbb::internal::no_copy { + /** Count must be atomic because it is hidden state for user, but might be shared by threads. */ + tbb::atomic ref_count; +protected: + filter_node() { + ref_count = 0; +#ifdef __TBB_TEST_FILTER_NODE_COUNT + ++(__TBB_TEST_FILTER_NODE_COUNT); +#endif + } +public: + //! Add concrete_filter to pipeline + virtual void add_to( pipeline& ) = 0; + //! Increment reference count + void add_ref() { ++ref_count; } + //! Decrement reference count and delete if it becomes zero. + void remove_ref() { + __TBB_ASSERT(ref_count>0,"ref_count underflow"); + if( --ref_count==0 ) + delete this; + } + virtual ~filter_node() { +#ifdef __TBB_TEST_FILTER_NODE_COUNT + --(__TBB_TEST_FILTER_NODE_COUNT); +#endif + } +}; + +//! Node in parse tree representing result of make_filter. +template +class filter_node_leaf: public filter_node { + const tbb::filter::mode mode; + const Body body; + void add_to( pipeline& p ) __TBB_override { + concrete_filter* f = new concrete_filter(mode,body); + p.add_filter( *f ); + } +public: + filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(b) {} +}; + +//! Node in parse tree representing join of two filters. +class filter_node_join: public filter_node { + friend class filter_node; // to suppress GCC 3.2 warnings + filter_node& left; + filter_node& right; + ~filter_node_join() { + left.remove_ref(); + right.remove_ref(); + } + void add_to( pipeline& p ) __TBB_override { + left.add_to(p); + right.add_to(p); + } +public: + filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y) { + left.add_ref(); + right.add_ref(); + } +}; + +} // namespace internal +//! @endcond + +//! Create a filter to participate in parallel_pipeline +template +filter_t make_filter(tbb::filter::mode mode, const Body& body) { + return new internal::filter_node_leaf(mode, body); +} + +template +filter_t operator& (const filter_t& left, const filter_t& right) { + __TBB_ASSERT(left.root,"cannot use default-constructed filter_t as left argument of '&'"); + __TBB_ASSERT(right.root,"cannot use default-constructed filter_t as right argument of '&'"); + return new internal::filter_node_join(*left.root,*right.root); +} + +//! Class representing a chain of type-safe pipeline filters +template +class filter_t { + typedef internal::filter_node filter_node; + filter_node* root; + filter_t( filter_node* root_ ) : root(root_) { + root->add_ref(); + } + friend class internal::pipeline_proxy; + template + friend filter_t make_filter(tbb::filter::mode, const Body& ); + template + friend filter_t operator& (const filter_t& , const filter_t& ); +public: + // TODO: add move-constructors, move-assignment, etc. where C++11 is available. + filter_t() : root(NULL) {} + filter_t( const filter_t& rhs ) : root(rhs.root) { + if( root ) root->add_ref(); + } + template + filter_t( tbb::filter::mode mode, const Body& body ) : + root( new internal::filter_node_leaf(mode, body) ) { + root->add_ref(); + } + + void operator=( const filter_t& rhs ) { + // Order of operations below carefully chosen so that reference counts remain correct + // in unlikely event that remove_ref throws exception. + filter_node* old = root; + root = rhs.root; + if( root ) root->add_ref(); + if( old ) old->remove_ref(); + } + ~filter_t() { + if( root ) root->remove_ref(); + } + void clear() { + // Like operator= with filter_t() on right side. + if( root ) { + filter_node* old = root; + root = NULL; + old->remove_ref(); + } + } +}; + +inline internal::pipeline_proxy::pipeline_proxy( const filter_t& filter_chain ) : my_pipe() { + __TBB_ASSERT( filter_chain.root, "cannot apply parallel_pipeline to default-constructed filter_t" ); + filter_chain.root->add_to(my_pipe); +} + +inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain +#if __TBB_TASK_GROUP_CONTEXT + , tbb::task_group_context& context +#endif + ) { + internal::pipeline_proxy pipe(filter_chain); + // tbb::pipeline::run() is called via the proxy + pipe->run(max_number_of_live_tokens +#if __TBB_TASK_GROUP_CONTEXT + , context +#endif + ); +} + +#if __TBB_TASK_GROUP_CONTEXT +inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain) { + tbb::task_group_context context; + parallel_pipeline(max_number_of_live_tokens, filter_chain, context); +} +#endif // __TBB_TASK_GROUP_CONTEXT + +} // interface6 + +using interface6::flow_control; +using interface6::filter_t; +using interface6::make_filter; +using interface6::parallel_pipeline; + +} // tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_pipeline_H_include_area + +#endif /* __TBB_pipeline_H */ diff --git a/ohos/arm64-v8a/include/tbb/queuing_mutex.h b/ohos/arm64-v8a/include/tbb/queuing_mutex.h new file mode 100644 index 00000000..c5c64993 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/queuing_mutex.h @@ -0,0 +1,113 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_queuing_mutex_H +#define __TBB_queuing_mutex_H + +#define __TBB_queuing_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include +#include "atomic.h" +#include "tbb_profiling.h" + +namespace tbb { + +//! Queuing mutex with local-only spinning. +/** @ingroup synchronization */ +class queuing_mutex : internal::mutex_copy_deprecated_and_disabled { +public: + //! Construct unacquired mutex. + queuing_mutex() { + q_tail = NULL; +#if TBB_USE_THREADING_TOOLS + internal_construct(); +#endif + } + + //! The scoped locking pattern + /** It helps to avoid the common problem of forgetting to release lock. + It also nicely provides the "node" for queuing locks. */ + class scoped_lock: internal::no_copy { + //! Initialize fields to mean "no lock held". + void initialize() { + mutex = NULL; + going = 0; +#if TBB_USE_ASSERT + internal::poison_pointer(next); +#endif /* TBB_USE_ASSERT */ + } + + public: + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + scoped_lock() {initialize();} + + //! Acquire lock on given mutex. + scoped_lock( queuing_mutex& m ) { + initialize(); + acquire(m); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if( mutex ) release(); + } + + //! Acquire lock on given mutex. + void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m ); + + //! Acquire lock on given mutex if free (i.e. non-blocking) + bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m ); + + //! Release lock. + void __TBB_EXPORTED_METHOD release(); + + private: + //! The pointer to the mutex owned, or NULL if not holding a mutex. + queuing_mutex* mutex; + + //! The pointer to the next competitor for a mutex + scoped_lock *next; + + //! The local spin-wait variable + /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of + zero-initialization. Defining it as an entire word instead of + a byte seems to help performance slightly. */ + uintptr_t going; + }; + + void __TBB_EXPORTED_METHOD internal_construct(); + + // Mutex traits + static const bool is_rw_mutex = false; + static const bool is_recursive_mutex = false; + static const bool is_fair_mutex = true; + +private: + //! The last competitor requesting the lock + atomic q_tail; + +}; + +__TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex) + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_queuing_mutex_H_include_area + +#endif /* __TBB_queuing_mutex_H */ diff --git a/ohos/arm64-v8a/include/tbb/queuing_rw_mutex.h b/ohos/arm64-v8a/include/tbb/queuing_rw_mutex.h new file mode 100644 index 00000000..b264141c --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/queuing_rw_mutex.h @@ -0,0 +1,154 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_queuing_rw_mutex_H +#define __TBB_queuing_rw_mutex_H + +#define __TBB_queuing_rw_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include +#include "atomic.h" +#include "tbb_profiling.h" + +namespace tbb { + +//! Queuing reader-writer mutex with local-only spinning. +/** Adapted from Krieger, Stumm, et al. pseudocode at + http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93 + @ingroup synchronization */ +class queuing_rw_mutex : internal::mutex_copy_deprecated_and_disabled { +public: + //! Construct unacquired mutex. + queuing_rw_mutex() { + q_tail = NULL; +#if TBB_USE_THREADING_TOOLS + internal_construct(); +#endif + } + + //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-NULL + ~queuing_rw_mutex() { +#if TBB_USE_ASSERT + __TBB_ASSERT( !q_tail, "destruction of an acquired mutex"); +#endif + } + + //! The scoped locking pattern + /** It helps to avoid the common problem of forgetting to release lock. + It also nicely provides the "node" for queuing locks. */ + class scoped_lock: internal::no_copy { + //! Initialize fields to mean "no lock held". + void initialize() { + my_mutex = NULL; + my_internal_lock = 0; + my_going = 0; +#if TBB_USE_ASSERT + my_state = 0xFF; // Set to invalid state + internal::poison_pointer(my_next); + internal::poison_pointer(my_prev); +#endif /* TBB_USE_ASSERT */ + } + + public: + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + scoped_lock() {initialize();} + + //! Acquire lock on given mutex. + scoped_lock( queuing_rw_mutex& m, bool write=true ) { + initialize(); + acquire(m,write); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if( my_mutex ) release(); + } + + //! Acquire lock on given mutex. + void acquire( queuing_rw_mutex& m, bool write=true ); + + //! Acquire lock on given mutex if free (i.e. non-blocking) + bool try_acquire( queuing_rw_mutex& m, bool write=true ); + + //! Release lock. + void release(); + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + bool upgrade_to_writer(); + + //! Downgrade writer to become a reader. + bool downgrade_to_reader(); + + private: + //! The pointer to the mutex owned, or NULL if not holding a mutex. + queuing_rw_mutex* my_mutex; + + //! The pointer to the previous and next competitors for a mutex + scoped_lock *__TBB_atomic my_prev, *__TBB_atomic my_next; + + typedef unsigned char state_t; + + //! State of the request: reader, writer, active reader, other service states + atomic my_state; + + //! The local spin-wait variable + /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */ + unsigned char __TBB_atomic my_going; + + //! A tiny internal lock + unsigned char my_internal_lock; + + //! Acquire the internal lock + void acquire_internal_lock(); + + //! Try to acquire the internal lock + /** Returns true if lock was successfully acquired. */ + bool try_acquire_internal_lock(); + + //! Release the internal lock + void release_internal_lock(); + + //! Wait for internal lock to be released + void wait_for_release_of_internal_lock(); + + //! A helper function + void unblock_or_wait_on_internal_lock( uintptr_t ); + }; + + void __TBB_EXPORTED_METHOD internal_construct(); + + // Mutex traits + static const bool is_rw_mutex = true; + static const bool is_recursive_mutex = false; + static const bool is_fair_mutex = true; + +private: + //! The last competitor requesting the lock + atomic q_tail; + +}; + +__TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex) + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_queuing_rw_mutex_H_include_area + +#endif /* __TBB_queuing_rw_mutex_H */ diff --git a/ohos/arm64-v8a/include/tbb/reader_writer_lock.h b/ohos/arm64-v8a/include/tbb/reader_writer_lock.h new file mode 100644 index 00000000..509ff7b7 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/reader_writer_lock.h @@ -0,0 +1,246 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_reader_writer_lock_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_reader_writer_lock_H +#pragma message("TBB Warning: tbb/reader_writer_lock.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_reader_writer_lock_H +#define __TBB_reader_writer_lock_H + +#define __TBB_reader_writer_lock_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_thread.h" +#include "tbb_allocator.h" +#include "atomic.h" + +namespace tbb { +namespace interface5 { +//! Writer-preference reader-writer lock with local-only spinning on readers. +/** Loosely adapted from Mellor-Crummey and Scott pseudocode at + http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp + @ingroup synchronization */ + class __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::reader_writer_lock is deprecated, use std::shared_mutex") + reader_writer_lock : tbb::internal::no_copy { + public: + friend class scoped_lock; + friend class scoped_lock_read; + //! Status type for nodes associated with lock instances + /** waiting_nonblocking: the wait state for nonblocking lock + instances; for writes, these transition straight to active + states; for reads, these are unused. + + waiting: the start and spin state for all lock instances; these will + transition to active state when appropriate. Non-blocking write locks + transition from this state to waiting_nonblocking immediately. + + active: the active state means that the lock instance holds + the lock; it will transition to invalid state during node deletion + + invalid: the end state for all nodes; this is set in the + destructor so if we encounter this state, we are looking at + memory that has already been freed + + The state diagrams below describe the status transitions. + Single arrows indicate that the thread that owns the node is + responsible for the transition; double arrows indicate that + any thread could make the transition. + + State diagram for scoped_lock status: + + waiting ----------> waiting_nonblocking + | _____________/ | + V V V + active -----------------> invalid + + State diagram for scoped_lock_read status: + + waiting + | + V + active ----------------->invalid + + */ + enum status_t { waiting_nonblocking, waiting, active, invalid }; + + //! Constructs a new reader_writer_lock + reader_writer_lock() { + internal_construct(); + } + + //! Destructs a reader_writer_lock object + ~reader_writer_lock() { + internal_destroy(); + } + + //! The scoped lock pattern for write locks + /** Scoped locks help avoid the common problem of forgetting to release the lock. + This type also serves as the node for queuing locks. */ + class scoped_lock : tbb::internal::no_copy { + public: + friend class reader_writer_lock; + + //! Construct with blocking attempt to acquire write lock on the passed-in lock + scoped_lock(reader_writer_lock& lock) { + internal_construct(lock); + } + + //! Destructor, releases the write lock + ~scoped_lock() { + internal_destroy(); + } + + void* operator new(size_t s) { + return tbb::internal::allocate_via_handler_v3(s); + } + void operator delete(void* p) { + tbb::internal::deallocate_via_handler_v3(p); + } + + private: + //! The pointer to the mutex to lock + reader_writer_lock *mutex; + //! The next queued competitor for the mutex + scoped_lock* next; + //! Status flag of the thread associated with this node + atomic status; + + //! Construct scoped_lock that is not holding lock + scoped_lock(); + + void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); + void __TBB_EXPORTED_METHOD internal_destroy(); + }; + + //! The scoped lock pattern for read locks + class scoped_lock_read : tbb::internal::no_copy { + public: + friend class reader_writer_lock; + + //! Construct with blocking attempt to acquire read lock on the passed-in lock + scoped_lock_read(reader_writer_lock& lock) { + internal_construct(lock); + } + + //! Destructor, releases the read lock + ~scoped_lock_read() { + internal_destroy(); + } + + void* operator new(size_t s) { + return tbb::internal::allocate_via_handler_v3(s); + } + void operator delete(void* p) { + tbb::internal::deallocate_via_handler_v3(p); + } + + private: + //! The pointer to the mutex to lock + reader_writer_lock *mutex; + //! The next queued competitor for the mutex + scoped_lock_read *next; + //! Status flag of the thread associated with this node + atomic status; + + //! Construct scoped_lock_read that is not holding lock + scoped_lock_read(); + + void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); + void __TBB_EXPORTED_METHOD internal_destroy(); + }; + + //! Acquires the reader_writer_lock for write. + /** If the lock is currently held in write mode by another + context, the writer will block by spinning on a local + variable. Exceptions thrown: improper_lock The context tries + to acquire a reader_writer_lock that it already has write + ownership of.*/ + void __TBB_EXPORTED_METHOD lock(); + + //! Tries to acquire the reader_writer_lock for write. + /** This function does not block. Return Value: True or false, + depending on whether the lock is acquired or not. If the lock + is already held by this acquiring context, try_lock() returns + false. */ + bool __TBB_EXPORTED_METHOD try_lock(); + + //! Acquires the reader_writer_lock for read. + /** If the lock is currently held by a writer, this reader will + block and wait until the writers are done. Exceptions thrown: + improper_lock The context tries to acquire a + reader_writer_lock that it already has write ownership of. */ + void __TBB_EXPORTED_METHOD lock_read(); + + //! Tries to acquire the reader_writer_lock for read. + /** This function does not block. Return Value: True or false, + depending on whether the lock is acquired or not. */ + bool __TBB_EXPORTED_METHOD try_lock_read(); + + //! Releases the reader_writer_lock + void __TBB_EXPORTED_METHOD unlock(); + + private: + void __TBB_EXPORTED_METHOD internal_construct(); + void __TBB_EXPORTED_METHOD internal_destroy(); + + //! Attempts to acquire write lock + /** If unavailable, spins in blocking case, returns false in non-blocking case. */ + bool start_write(scoped_lock *); + //! Sets writer_head to w and attempts to unblock + void set_next_writer(scoped_lock *w); + //! Relinquishes write lock to next waiting writer or group of readers + void end_write(scoped_lock *); + //! Checks if current thread holds write lock + bool is_current_writer(); + + //! Attempts to acquire read lock + /** If unavailable, spins in blocking case, returns false in non-blocking case. */ + void start_read(scoped_lock_read *); + //! Unblocks pending readers + void unblock_readers(); + //! Relinquishes read lock by decrementing counter; last reader wakes pending writer + void end_read(); + + //! The list of pending readers + atomic reader_head; + //! The list of pending writers + atomic writer_head; + //! The last node in the list of pending writers + atomic writer_tail; + //! Writer that owns the mutex; tbb_thread::id() otherwise. + tbb_thread::id my_current_writer; + //! Status of mutex + atomic rdr_count_and_flags; // used with __TBB_AtomicOR, which assumes uintptr_t +}; + +} // namespace interface5 + +using interface5::reader_writer_lock; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_reader_writer_lock_H_include_area + +#endif /* __TBB_reader_writer_lock_H */ diff --git a/ohos/arm64-v8a/include/tbb/recursive_mutex.h b/ohos/arm64-v8a/include/tbb/recursive_mutex.h new file mode 100644 index 00000000..d5be2c4f --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/recursive_mutex.h @@ -0,0 +1,248 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_recursive_mutex_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_recursive_mutex_H +#pragma message("TBB Warning: tbb/recursive_mutex.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_recursive_mutex_H +#define __TBB_recursive_mutex_H + +#define __TBB_recursive_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if _WIN32||_WIN64 +#include "machine/windows_api.h" +#else +#include +#endif /* _WIN32||_WIN64 */ + +#include +#include "aligned_space.h" +#include "tbb_stddef.h" +#include "tbb_profiling.h" + +namespace tbb { +//! Mutex that allows recursive mutex acquisition. +/** Mutex that allows recursive mutex acquisition. + @ingroup synchronization */ +class __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::recursive_mutex is deprecated, use std::recursive_mutex") +recursive_mutex : internal::mutex_copy_deprecated_and_disabled { +public: + //! Construct unacquired recursive_mutex. + recursive_mutex() { +#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS + internal_construct(); +#else + #if _WIN32||_WIN64 + InitializeCriticalSectionEx(&impl, 4000, 0); + #else + pthread_mutexattr_t mtx_attr; + int error_code = pthread_mutexattr_init( &mtx_attr ); + if( error_code ) + tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed"); + + pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); + error_code = pthread_mutex_init( &impl, &mtx_attr ); + if( error_code ) + tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed"); + + pthread_mutexattr_destroy( &mtx_attr ); + #endif /* _WIN32||_WIN64*/ +#endif /* TBB_USE_ASSERT */ + }; + + ~recursive_mutex() { +#if TBB_USE_ASSERT + internal_destroy(); +#else + #if _WIN32||_WIN64 + DeleteCriticalSection(&impl); + #else + pthread_mutex_destroy(&impl); + + #endif /* _WIN32||_WIN64 */ +#endif /* TBB_USE_ASSERT */ + }; + + class scoped_lock; + friend class scoped_lock; + + //! The scoped locking pattern + /** It helps to avoid the common problem of forgetting to release lock. + It also nicely provides the "node" for queuing locks. */ + class scoped_lock: internal::no_copy { + public: + //! Construct lock that has not acquired a recursive_mutex. + scoped_lock() : my_mutex(NULL) {}; + + //! Acquire lock on given mutex. + scoped_lock( recursive_mutex& mutex ) { +#if TBB_USE_ASSERT + my_mutex = &mutex; +#endif /* TBB_USE_ASSERT */ + acquire( mutex ); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if( my_mutex ) + release(); + } + + //! Acquire lock on given mutex. + void acquire( recursive_mutex& mutex ) { +#if TBB_USE_ASSERT + internal_acquire( mutex ); +#else + my_mutex = &mutex; + mutex.lock(); +#endif /* TBB_USE_ASSERT */ + } + + //! Try acquire lock on given recursive_mutex. + bool try_acquire( recursive_mutex& mutex ) { +#if TBB_USE_ASSERT + return internal_try_acquire( mutex ); +#else + bool result = mutex.try_lock(); + if( result ) + my_mutex = &mutex; + return result; +#endif /* TBB_USE_ASSERT */ + } + + //! Release lock + void release() { +#if TBB_USE_ASSERT + internal_release(); +#else + my_mutex->unlock(); + my_mutex = NULL; +#endif /* TBB_USE_ASSERT */ + } + + private: + //! The pointer to the current recursive_mutex to work + recursive_mutex* my_mutex; + + //! All checks from acquire using mutex.state were moved here + void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m ); + + //! All checks from try_acquire using mutex.state were moved here + bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m ); + + //! All checks from release using mutex.state were moved here + void __TBB_EXPORTED_METHOD internal_release(); + + friend class recursive_mutex; + }; + + // Mutex traits + static const bool is_rw_mutex = false; + static const bool is_recursive_mutex = true; + static const bool is_fair_mutex = false; + + // C++0x compatibility interface + + //! Acquire lock + void lock() { +#if TBB_USE_ASSERT + aligned_space tmp; + new(tmp.begin()) scoped_lock(*this); +#else + #if _WIN32||_WIN64 + EnterCriticalSection(&impl); + #else + int error_code = pthread_mutex_lock(&impl); + if( error_code ) + tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_lock failed"); + #endif /* _WIN32||_WIN64 */ +#endif /* TBB_USE_ASSERT */ + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() { +#if TBB_USE_ASSERT + aligned_space tmp; + return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); +#else + #if _WIN32||_WIN64 + return TryEnterCriticalSection(&impl)!=0; + #else + return pthread_mutex_trylock(&impl)==0; + #endif /* _WIN32||_WIN64 */ +#endif /* TBB_USE_ASSERT */ + } + + //! Release lock + void unlock() { +#if TBB_USE_ASSERT + aligned_space tmp; + scoped_lock& s = *tmp.begin(); + s.my_mutex = this; + s.internal_release(); +#else + #if _WIN32||_WIN64 + LeaveCriticalSection(&impl); + #else + pthread_mutex_unlock(&impl); + #endif /* _WIN32||_WIN64 */ +#endif /* TBB_USE_ASSERT */ + } + + //! Return native_handle + #if _WIN32||_WIN64 + typedef LPCRITICAL_SECTION native_handle_type; + #else + typedef pthread_mutex_t* native_handle_type; + #endif + native_handle_type native_handle() { return (native_handle_type) &impl; } + +private: +#if _WIN32||_WIN64 + CRITICAL_SECTION impl; + enum state_t { + INITIALIZED=0x1234, + DESTROYED=0x789A, + } state; +#else + pthread_mutex_t impl; +#endif /* _WIN32||_WIN64 */ + + //! All checks from mutex constructor using mutex.state were moved here + void __TBB_EXPORTED_METHOD internal_construct(); + + //! All checks from mutex destructor using mutex.state were moved here + void __TBB_EXPORTED_METHOD internal_destroy(); +}; + +__TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex) + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_recursive_mutex_H_include_area + +#endif /* __TBB_recursive_mutex_H */ diff --git a/ohos/arm64-v8a/include/tbb/runtime_loader.h b/ohos/arm64-v8a/include/tbb/runtime_loader.h new file mode 100644 index 00000000..9aaeea5b --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/runtime_loader.h @@ -0,0 +1,193 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_runtime_loader_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_runtime_loader_H +#pragma message("TBB Warning: tbb/runtime_loader.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_runtime_loader_H +#define __TBB_runtime_loader_H + +#define __TBB_runtime_loader_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#if ! TBB_PREVIEW_RUNTIME_LOADER + #error Set TBB_PREVIEW_RUNTIME_LOADER to include runtime_loader.h +#endif + +#include "tbb_stddef.h" +#include + +#if _MSC_VER + #if ! __TBB_NO_IMPLICIT_LINKAGE + #ifdef _DEBUG + #pragma comment( linker, "/nodefaultlib:tbb_debug.lib" ) + #pragma comment( linker, "/defaultlib:tbbproxy_debug.lib" ) + #else + #pragma comment( linker, "/nodefaultlib:tbb.lib" ) + #pragma comment( linker, "/defaultlib:tbbproxy.lib" ) + #endif + #endif +#endif + +namespace tbb { + +namespace interface6 { + +//! Load TBB at runtime. +/*! + +\b Usage: + +In source code: + +\code +#include "tbb/runtime_loader.h" + +char const * path[] = { "/lib/ia32", NULL }; +tbb::runtime_loader loader( path ); + +// Now use TBB. +\endcode + +Link with \c tbbproxy.lib (or \c libtbbproxy.a) instead of \c tbb.lib (\c libtbb.dylib, +\c libtbb.so). + +TBB library will be loaded at runtime from \c /lib/ia32 directory. + +\b Attention: + +All \c runtime_loader objects (in the same module, i.e. exe or dll) share some global state. +The most noticeable piece of global state is loaded TBB library. +There are some implications: + + - Only one TBB library can be loaded per module. + + - If one object has already loaded TBB library, another object will not load TBB. + If the loaded TBB library is suitable for the second object, both will use TBB + cooperatively, otherwise the second object will report an error. + + - \c runtime_loader objects will not work (correctly) in parallel due to absence of + synchronization. + +*/ + +class __TBB_DEPRECATED_IN_VERBOSE_MODE runtime_loader : tbb::internal::no_copy { + + public: + + //! Error mode constants. + enum error_mode { + em_status, //!< Save status of operation and continue. + em_throw, //!< Throw an exception of tbb::runtime_loader::error_code type. + em_abort //!< Print message to \c stderr and call \c abort(). + }; // error_mode + + //! Error codes. + enum error_code { + ec_ok, //!< No errors. + ec_bad_call, //!< Invalid function call (e. g. load() called when TBB is already loaded). + ec_bad_arg, //!< Invalid argument passed. + ec_bad_lib, //!< Invalid library found (e. g. \c TBB_runtime_version symbol not found). + ec_bad_ver, //!< TBB found but version is not suitable. + ec_no_lib //!< No suitable TBB library found. + }; // error_code + + //! Initialize object but do not load TBB. + runtime_loader( error_mode mode = em_abort ); + + //! Initialize object and load TBB. + /*! + See load() for details. + + If error mode is \c em_status, call status() to check whether TBB was loaded or not. + */ + runtime_loader( + char const * path[], //!< List of directories to search TBB in. + int min_ver = TBB_INTERFACE_VERSION, //!< Minimal suitable version of TBB. + int max_ver = INT_MAX, //!< Maximal suitable version of TBB. + error_mode mode = em_abort //!< Error mode for this object. + ); + + //! Destroy object. + ~runtime_loader(); + + //! Load TBB. + /*! + The method searches the directories specified in \c path[] array for the TBB library. + When the library is found, it is loaded and its version is checked. If the version is + not suitable, the library is unloaded, and the search continues. + + \b Note: + + For security reasons, avoid using relative directory names. For example, never load + TBB from current (\c "."), parent (\c "..") or any other relative directory (like + \c "lib" ). Use only absolute directory names (e. g. "/usr/local/lib"). + + For the same security reasons, avoid using system default directories (\c "") on + Windows. (See http://www.microsoft.com/technet/security/advisory/2269637.mspx for + details.) + + Neglecting these rules may cause your program to execute 3-rd party malicious code. + + \b Errors: + - \c ec_bad_call - TBB already loaded by this object. + - \c ec_bad_arg - \p min_ver and/or \p max_ver negative or zero, + or \p min_ver > \p max_ver. + - \c ec_bad_ver - TBB of unsuitable version already loaded by another object. + - \c ec_no_lib - No suitable library found. + */ + error_code + load( + char const * path[], //!< List of directories to search TBB in. + int min_ver = TBB_INTERFACE_VERSION, //!< Minimal suitable version of TBB. + int max_ver = INT_MAX //!< Maximal suitable version of TBB. + + ); + + + //! Report status. + /*! + If error mode is \c em_status, the function returns status of the last operation. + */ + error_code status(); + + private: + + error_mode const my_mode; + error_code my_status; + bool my_loaded; + +}; // class runtime_loader + +} // namespace interface6 + +using interface6::runtime_loader; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_runtime_loader_H_include_area + +#endif /* __TBB_runtime_loader_H */ + diff --git a/ohos/arm64-v8a/include/tbb/scalable_allocator.h b/ohos/arm64-v8a/include/tbb/scalable_allocator.h new file mode 100644 index 00000000..f1fc98ed --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/scalable_allocator.h @@ -0,0 +1,388 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_scalable_allocator_H +#define __TBB_scalable_allocator_H +/** @file */ + +#include /* Need ptrdiff_t and size_t from here. */ +#if !_MSC_VER +#include /* Need intptr_t from here. */ +#endif + +#if !defined(__cplusplus) && __ICC==1100 + #pragma warning (push) + #pragma warning (disable: 991) +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if _MSC_VER >= 1400 +#define __TBB_EXPORTED_FUNC __cdecl +#else +#define __TBB_EXPORTED_FUNC +#endif + +/** The "malloc" analogue to allocate block of memory of size bytes. + * @ingroup memory_allocation */ +void * __TBB_EXPORTED_FUNC scalable_malloc (size_t size); + +/** The "free" analogue to discard a previously allocated piece of memory. + @ingroup memory_allocation */ +void __TBB_EXPORTED_FUNC scalable_free (void* ptr); + +/** The "realloc" analogue complementing scalable_malloc. + @ingroup memory_allocation */ +void * __TBB_EXPORTED_FUNC scalable_realloc (void* ptr, size_t size); + +/** The "calloc" analogue complementing scalable_malloc. + @ingroup memory_allocation */ +void * __TBB_EXPORTED_FUNC scalable_calloc (size_t nobj, size_t size); + +/** The "posix_memalign" analogue. + @ingroup memory_allocation */ +int __TBB_EXPORTED_FUNC scalable_posix_memalign (void** memptr, size_t alignment, size_t size); + +/** The "_aligned_malloc" analogue. + @ingroup memory_allocation */ +void * __TBB_EXPORTED_FUNC scalable_aligned_malloc (size_t size, size_t alignment); + +/** The "_aligned_realloc" analogue. + @ingroup memory_allocation */ +void * __TBB_EXPORTED_FUNC scalable_aligned_realloc (void* ptr, size_t size, size_t alignment); + +/** The "_aligned_free" analogue. + @ingroup memory_allocation */ +void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr); + +/** The analogue of _msize/malloc_size/malloc_usable_size. + Returns the usable size of a memory block previously allocated by scalable_*, + or 0 (zero) if ptr does not point to such a block. + @ingroup memory_allocation */ +size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr); + +/* Results for scalable_allocation_* functions */ +typedef enum { + TBBMALLOC_OK, + TBBMALLOC_INVALID_PARAM, + TBBMALLOC_UNSUPPORTED, + TBBMALLOC_NO_MEMORY, + TBBMALLOC_NO_EFFECT +} ScalableAllocationResult; + +/* Setting TBB_MALLOC_USE_HUGE_PAGES environment variable to 1 enables huge pages. + scalable_allocation_mode call has priority over environment variable. */ +typedef enum { + TBBMALLOC_USE_HUGE_PAGES, /* value turns using huge pages on and off */ + /* deprecated, kept for backward compatibility only */ + USE_HUGE_PAGES = TBBMALLOC_USE_HUGE_PAGES, + /* try to limit memory consumption value (Bytes), clean internal buffers + if limit is exceeded, but not prevents from requesting memory from OS */ + TBBMALLOC_SET_SOFT_HEAP_LIMIT, + /* Lower bound for the size (Bytes), that is interpreted as huge + * and not released during regular cleanup operations. */ + TBBMALLOC_SET_HUGE_SIZE_THRESHOLD +} AllocationModeParam; + +/** Set TBB allocator-specific allocation modes. + @ingroup memory_allocation */ +int __TBB_EXPORTED_FUNC scalable_allocation_mode(int param, intptr_t value); + +typedef enum { + /* Clean internal allocator buffers for all threads. + Returns TBBMALLOC_NO_EFFECT if no buffers cleaned, + TBBMALLOC_OK if some memory released from buffers. */ + TBBMALLOC_CLEAN_ALL_BUFFERS, + /* Clean internal allocator buffer for current thread only. + Return values same as for TBBMALLOC_CLEAN_ALL_BUFFERS. */ + TBBMALLOC_CLEAN_THREAD_BUFFERS +} ScalableAllocationCmd; + +/** Call TBB allocator-specific commands. + @ingroup memory_allocation */ +int __TBB_EXPORTED_FUNC scalable_allocation_command(int cmd, void *param); + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#ifdef __cplusplus + +//! The namespace rml contains components of low-level memory pool interface. +namespace rml { +class MemoryPool; + +typedef void *(*rawAllocType)(intptr_t pool_id, size_t &bytes); +// returns non-zero in case of error +typedef int (*rawFreeType)(intptr_t pool_id, void* raw_ptr, size_t raw_bytes); + +/* +MemPoolPolicy extension must be compatible with such structure fields layout + +struct MemPoolPolicy { + rawAllocType pAlloc; + rawFreeType pFree; + size_t granularity; // granularity of pAlloc allocations +}; +*/ + +struct MemPoolPolicy { + enum { + TBBMALLOC_POOL_VERSION = 1 + }; + + rawAllocType pAlloc; + rawFreeType pFree; + // granularity of pAlloc allocations. 0 means default used. + size_t granularity; + int version; + // all memory consumed at 1st pAlloc call and never returned, + // no more pAlloc calls after 1st + unsigned fixedPool : 1, + // memory consumed but returned only at pool termination + keepAllMemory : 1, + reserved : 30; + + MemPoolPolicy(rawAllocType pAlloc_, rawFreeType pFree_, + size_t granularity_ = 0, bool fixedPool_ = false, + bool keepAllMemory_ = false) : + pAlloc(pAlloc_), pFree(pFree_), granularity(granularity_), version(TBBMALLOC_POOL_VERSION), + fixedPool(fixedPool_), keepAllMemory(keepAllMemory_), + reserved(0) {} +}; + +// enums have same values as appropriate enums from ScalableAllocationResult +// TODO: use ScalableAllocationResult in pool_create directly +enum MemPoolError { + // pool created successfully + POOL_OK = TBBMALLOC_OK, + // invalid policy parameters found + INVALID_POLICY = TBBMALLOC_INVALID_PARAM, + // requested pool policy is not supported by allocator library + UNSUPPORTED_POLICY = TBBMALLOC_UNSUPPORTED, + // lack of memory during pool creation + NO_MEMORY = TBBMALLOC_NO_MEMORY, + // action takes no effect + NO_EFFECT = TBBMALLOC_NO_EFFECT +}; + +MemPoolError pool_create_v1(intptr_t pool_id, const MemPoolPolicy *policy, + rml::MemoryPool **pool); + +bool pool_destroy(MemoryPool* memPool); +void *pool_malloc(MemoryPool* memPool, size_t size); +void *pool_realloc(MemoryPool* memPool, void *object, size_t size); +void *pool_aligned_malloc(MemoryPool* mPool, size_t size, size_t alignment); +void *pool_aligned_realloc(MemoryPool* mPool, void *ptr, size_t size, size_t alignment); +bool pool_reset(MemoryPool* memPool); +bool pool_free(MemoryPool *memPool, void *object); +MemoryPool *pool_identify(void *object); +size_t pool_msize(MemoryPool *memPool, void *object); + +} // namespace rml + +#include /* To use new with the placement argument */ + +/* Ensure that including this header does not cause implicit linkage with TBB */ +#ifndef __TBB_NO_IMPLICIT_LINKAGE + #define __TBB_NO_IMPLICIT_LINKAGE 1 + #include "tbb_stddef.h" + #undef __TBB_NO_IMPLICIT_LINKAGE +#else + #include "tbb_stddef.h" +#endif + +#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC +#include // std::forward +#endif + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT +#include +#endif + +namespace tbb { + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Workaround for erroneous "unreferenced parameter" warning in method destroy. + #pragma warning (push) + #pragma warning (disable: 4100) +#endif + +//! @cond INTERNAL +namespace internal { + +#if TBB_USE_EXCEPTIONS +// forward declaration is for inlining prevention +template __TBB_NOINLINE( void throw_exception(const E &e) ); +#endif + +// keep throw in a separate function to prevent code bloat +template +void throw_exception(const E &e) { + __TBB_THROW(e); +} + +} // namespace internal +//! @endcond + +//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 +/** The members are ordered the same way they are in section 20.4.1 + of the ISO C++ standard. + @ingroup memory_allocation */ +template +class scalable_allocator { +public: + typedef typename internal::allocator_type::value_type value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template struct rebind { + typedef scalable_allocator other; + }; + + scalable_allocator() throw() {} + scalable_allocator( const scalable_allocator& ) throw() {} + template scalable_allocator(const scalable_allocator&) throw() {} + + pointer address(reference x) const {return &x;} + const_pointer address(const_reference x) const {return &x;} + + //! Allocate space for n objects. + pointer allocate( size_type n, const void* /*hint*/ =0 ) { + pointer p = static_cast( scalable_malloc( n * sizeof(value_type) ) ); + if (!p) + internal::throw_exception(std::bad_alloc()); + return p; + } + + //! Free previously allocated block of memory + void deallocate( pointer p, size_type ) { + scalable_free( p ); + } + + //! Largest value for which method allocate might succeed. + size_type max_size() const throw() { + size_type absolutemax = static_cast(-1) / sizeof (value_type); + return (absolutemax > 0 ? absolutemax : 1); + } +#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + template + void construct(U *p, Args&&... args) + { ::new((void *)p) U(std::forward(args)...); } +#else /* __TBB_ALLOCATOR_CONSTRUCT_VARIADIC */ +#if __TBB_CPP11_RVALUE_REF_PRESENT + void construct( pointer p, value_type&& value ) { ::new((void*)(p)) value_type( std::move( value ) ); } +#endif + void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} +#endif /* __TBB_ALLOCATOR_CONSTRUCT_VARIADIC */ + void destroy( pointer p ) {p->~value_type();} +}; + +#if _MSC_VER && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif /* warning 4100 is back */ + +//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 +/** @ingroup memory_allocation */ +template<> +class scalable_allocator { +public: + typedef void* pointer; + typedef const void* const_pointer; + typedef void value_type; + template struct rebind { + typedef scalable_allocator other; + }; +}; + +template +inline bool operator==( const scalable_allocator&, const scalable_allocator& ) {return true;} + +template +inline bool operator!=( const scalable_allocator&, const scalable_allocator& ) {return false;} + +#if __TBB_CPP17_MEMORY_RESOURCE_PRESENT + +namespace internal { + +//! C++17 memory resource implementation for scalable allocator +//! ISO C++ Section 23.12.2 +class scalable_resource_impl : public std::pmr::memory_resource { +private: + void* do_allocate(size_t bytes, size_t alignment) override { + void* ptr = scalable_aligned_malloc( bytes, alignment ); + if (!ptr) { + throw_exception(std::bad_alloc()); + } + return ptr; + } + + void do_deallocate(void* ptr, size_t /*bytes*/, size_t /*alignment*/) override { + scalable_free(ptr); + } + + //! Memory allocated by one instance of scalable_resource_impl could be deallocated by any + //! other instance of this class + bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override { + return this == &other || +#if __TBB_USE_OPTIONAL_RTTI + dynamic_cast(&other) != NULL; +#else + false; +#endif + } +}; + +} // namespace internal + +//! Global scalable allocator memory resource provider +inline std::pmr::memory_resource* scalable_memory_resource() noexcept { + static tbb::internal::scalable_resource_impl scalable_res; + return &scalable_res; +} + +#endif /* __TBB_CPP17_MEMORY_RESOURCE_PRESENT */ + +} // namespace tbb + +#if _MSC_VER + #if (__TBB_BUILD || __TBBMALLOC_BUILD) && !defined(__TBBMALLOC_NO_IMPLICIT_LINKAGE) + #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 + #endif + + #if !__TBBMALLOC_NO_IMPLICIT_LINKAGE + #ifdef _DEBUG + #pragma comment(lib, "tbbmalloc_debug.lib") + #else + #pragma comment(lib, "tbbmalloc.lib") + #endif + #endif + + +#endif + +#endif /* __cplusplus */ + +#if !defined(__cplusplus) && __ICC==1100 + #pragma warning (pop) +#endif /* ICC 11.0 warning 991 is back */ + +#endif /* __TBB_scalable_allocator_H */ diff --git a/ohos/arm64-v8a/include/tbb/spin_mutex.h b/ohos/arm64-v8a/include/tbb/spin_mutex.h new file mode 100644 index 00000000..56348c9d --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/spin_mutex.h @@ -0,0 +1,214 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_spin_mutex_H +#define __TBB_spin_mutex_H + +#define __TBB_spin_mutex_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include +#include +#include "aligned_space.h" +#include "tbb_stddef.h" +#include "tbb_machine.h" +#include "tbb_profiling.h" +#include "internal/_mutex_padding.h" + +namespace tbb { + +//! A lock that occupies a single byte. +/** A spin_mutex is a spin mutex that fits in a single byte. + It should be used only for locking short critical sections + (typically less than 20 instructions) when fairness is not an issue. + If zero-initialized, the mutex is considered unheld. + @ingroup synchronization */ +class spin_mutex : internal::mutex_copy_deprecated_and_disabled { + //! 0 if lock is released, 1 if lock is acquired. + __TBB_atomic_flag flag; + +public: + //! Construct unacquired lock. + /** Equivalent to zero-initialization of *this. */ + spin_mutex() : flag(0) { +#if TBB_USE_THREADING_TOOLS + internal_construct(); +#endif + } + + //! Represents acquisition of a mutex. + class scoped_lock : internal::no_copy { + private: + //! Points to currently held mutex, or NULL if no lock is held. + spin_mutex* my_mutex; + + //! Value to store into spin_mutex::flag to unlock the mutex. + /** This variable is no longer used. Instead, 0 and 1 are used to + represent that the lock is free and acquired, respectively. + We keep the member variable here to ensure backward compatibility */ + __TBB_Flag my_unlock_value; + + //! Like acquire, but with ITT instrumentation. + void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m ); + + //! Like try_acquire, but with ITT instrumentation. + bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m ); + + //! Like release, but with ITT instrumentation. + void __TBB_EXPORTED_METHOD internal_release(); + + friend class spin_mutex; + + public: + //! Construct without acquiring a mutex. + scoped_lock() : my_mutex(NULL), my_unlock_value(0) {} + + //! Construct and acquire lock on a mutex. + scoped_lock( spin_mutex& m ) : my_unlock_value(0) { + internal::suppress_unused_warning(my_unlock_value); +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + my_mutex=NULL; + internal_acquire(m); +#else + my_mutex=&m; + __TBB_LockByte(m.flag); +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ + } + + //! Acquire lock. + void acquire( spin_mutex& m ) { +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + internal_acquire(m); +#else + my_mutex = &m; + __TBB_LockByte(m.flag); +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_acquire( spin_mutex& m ) { +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + return internal_try_acquire(m); +#else + bool result = __TBB_TryLockByte(m.flag); + if( result ) + my_mutex = &m; + return result; +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ + } + + //! Release lock + void release() { +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + internal_release(); +#else + __TBB_UnlockByte(my_mutex->flag); + my_mutex = NULL; +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ + } + + //! Destroy lock. If holding a lock, releases the lock first. + ~scoped_lock() { + if( my_mutex ) { +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + internal_release(); +#else + __TBB_UnlockByte(my_mutex->flag); +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ + } + } + }; + + //! Internal constructor with ITT instrumentation. + void __TBB_EXPORTED_METHOD internal_construct(); + + // Mutex traits + static const bool is_rw_mutex = false; + static const bool is_recursive_mutex = false; + static const bool is_fair_mutex = false; + + // ISO C++0x compatibility methods + + //! Acquire lock + void lock() { +#if TBB_USE_THREADING_TOOLS + aligned_space tmp; + new(tmp.begin()) scoped_lock(*this); +#else + __TBB_LockByte(flag); +#endif /* TBB_USE_THREADING_TOOLS*/ + } + + //! Try acquiring lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() { +#if TBB_USE_THREADING_TOOLS + aligned_space tmp; + return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); +#else + return __TBB_TryLockByte(flag); +#endif /* TBB_USE_THREADING_TOOLS*/ + } + + //! Release lock + void unlock() { +#if TBB_USE_THREADING_TOOLS + aligned_space tmp; + scoped_lock& s = *tmp.begin(); + s.my_mutex = this; + s.internal_release(); +#else + __TBB_UnlockByte(flag); +#endif /* TBB_USE_THREADING_TOOLS */ + } + + friend class scoped_lock; +}; // end of spin_mutex + +__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex) + +} // namespace tbb + +#if ( __TBB_x86_32 || __TBB_x86_64 ) +#include "internal/_x86_eliding_mutex_impl.h" +#endif + +namespace tbb { +//! A cross-platform spin mutex with speculative lock acquisition. +/** On platforms with proper HW support, this lock may speculatively execute + its critical sections, using HW mechanisms to detect real data races and + ensure atomicity of the critical sections. In particular, it uses + Intel(R) Transactional Synchronization Extensions (Intel(R) TSX). + Without such HW support, it behaves like a spin_mutex. + It should be used for locking short critical sections where the lock is + contended but the data it protects are not. If zero-initialized, the + mutex is considered unheld. + @ingroup synchronization */ + +#if ( __TBB_x86_32 || __TBB_x86_64 ) +typedef interface7::internal::padded_mutex speculative_spin_mutex; +#else +typedef interface7::internal::padded_mutex speculative_spin_mutex; +#endif +__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex) + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_spin_mutex_H_include_area + +#endif /* __TBB_spin_mutex_H */ diff --git a/ohos/arm64-v8a/include/tbb/spin_rw_mutex.h b/ohos/arm64-v8a/include/tbb/spin_rw_mutex.h new file mode 100644 index 00000000..57a4ce2f --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/spin_rw_mutex.h @@ -0,0 +1,252 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_spin_rw_mutex_H +#define __TBB_spin_rw_mutex_H + +#include "tbb_stddef.h" +#include "tbb_machine.h" +#include "tbb_profiling.h" +#include "internal/_mutex_padding.h" + +namespace tbb { + +#if __TBB_TSX_AVAILABLE +namespace interface8 { namespace internal { + class x86_rtm_rw_mutex; +}} +#endif + +class spin_rw_mutex_v3; +typedef spin_rw_mutex_v3 spin_rw_mutex; + +//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference +/** @ingroup synchronization */ +class spin_rw_mutex_v3 : internal::mutex_copy_deprecated_and_disabled { + //! @cond INTERNAL + + //! Internal acquire write lock. + bool __TBB_EXPORTED_METHOD internal_acquire_writer(); + + //! Out of line code for releasing a write lock. + /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ + void __TBB_EXPORTED_METHOD internal_release_writer(); + + //! Internal acquire read lock. + void __TBB_EXPORTED_METHOD internal_acquire_reader(); + + //! Internal upgrade reader to become a writer. + bool __TBB_EXPORTED_METHOD internal_upgrade(); + + //! Out of line code for downgrading a writer to a reader. + /** This code has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ + void __TBB_EXPORTED_METHOD internal_downgrade(); + + //! Internal release read lock. + void __TBB_EXPORTED_METHOD internal_release_reader(); + + //! Internal try_acquire write lock. + bool __TBB_EXPORTED_METHOD internal_try_acquire_writer(); + + //! Internal try_acquire read lock. + bool __TBB_EXPORTED_METHOD internal_try_acquire_reader(); + + //! @endcond +public: + //! Construct unacquired mutex. + spin_rw_mutex_v3() : state(0) { +#if TBB_USE_THREADING_TOOLS + internal_construct(); +#endif + } + +#if TBB_USE_ASSERT + //! Destructor asserts if the mutex is acquired, i.e. state is zero. + ~spin_rw_mutex_v3() { + __TBB_ASSERT( !state, "destruction of an acquired mutex"); + }; +#endif /* TBB_USE_ASSERT */ + + //! The scoped locking pattern + /** It helps to avoid the common problem of forgetting to release lock. + It also nicely provides the "node" for queuing locks. */ + class scoped_lock : internal::no_copy { +#if __TBB_TSX_AVAILABLE + friend class tbb::interface8::internal::x86_rtm_rw_mutex; +#endif + public: + //! Construct lock that has not acquired a mutex. + /** Equivalent to zero-initialization of *this. */ + scoped_lock() : mutex(NULL), is_writer(false) {} + + //! Acquire lock on given mutex. + scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) { + acquire(m, write); + } + + //! Release lock (if lock is held). + ~scoped_lock() { + if( mutex ) release(); + } + + //! Acquire lock on given mutex. + void acquire( spin_rw_mutex& m, bool write = true ) { + __TBB_ASSERT( !mutex, "holding mutex already" ); + is_writer = write; + mutex = &m; + if( write ) mutex->internal_acquire_writer(); + else mutex->internal_acquire_reader(); + } + + //! Upgrade reader to become a writer. + /** Returns whether the upgrade happened without releasing and re-acquiring the lock */ + bool upgrade_to_writer() { + __TBB_ASSERT( mutex, "mutex is not acquired" ); + if (is_writer) return true; // Already a writer + is_writer = true; + return mutex->internal_upgrade(); + } + + //! Release lock. + void release() { + __TBB_ASSERT( mutex, "mutex is not acquired" ); + spin_rw_mutex *m = mutex; + mutex = NULL; +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + if( is_writer ) m->internal_release_writer(); + else m->internal_release_reader(); +#else + if( is_writer ) __TBB_AtomicAND( &m->state, READERS ); + else __TBB_FetchAndAddWrelease( &m->state, -(intptr_t)ONE_READER); +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ + } + + //! Downgrade writer to become a reader. + bool downgrade_to_reader() { + __TBB_ASSERT( mutex, "mutex is not acquired" ); + if (!is_writer) return true; // Already a reader +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + mutex->internal_downgrade(); +#else + __TBB_FetchAndAddW( &mutex->state, ((intptr_t)ONE_READER-WRITER)); +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ + is_writer = false; + return true; + } + + //! Try acquire lock on given mutex. + bool try_acquire( spin_rw_mutex& m, bool write = true ) { + __TBB_ASSERT( !mutex, "holding mutex already" ); + bool result; + is_writer = write; + result = write? m.internal_try_acquire_writer() + : m.internal_try_acquire_reader(); + if( result ) + mutex = &m; + return result; + } + + protected: + + //! The pointer to the current mutex that is held, or NULL if no mutex is held. + spin_rw_mutex* mutex; + + //! If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock. + /** Not defined if not holding a lock. */ + bool is_writer; + }; + + // Mutex traits + static const bool is_rw_mutex = true; + static const bool is_recursive_mutex = false; + static const bool is_fair_mutex = false; + + // ISO C++0x compatibility methods + + //! Acquire writer lock + void lock() {internal_acquire_writer();} + + //! Try acquiring writer lock (non-blocking) + /** Return true if lock acquired; false otherwise. */ + bool try_lock() {return internal_try_acquire_writer();} + + //! Release lock + void unlock() { +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + if( state&WRITER ) internal_release_writer(); + else internal_release_reader(); +#else + if( state&WRITER ) __TBB_AtomicAND( &state, READERS ); + else __TBB_FetchAndAddWrelease( &state, -(intptr_t)ONE_READER); +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ + } + + // Methods for reader locks that resemble ISO C++0x compatibility methods. + + //! Acquire reader lock + void lock_read() {internal_acquire_reader();} + + //! Try acquiring reader lock (non-blocking) + /** Return true if reader lock acquired; false otherwise. */ + bool try_lock_read() {return internal_try_acquire_reader();} + +protected: + typedef intptr_t state_t; + static const state_t WRITER = 1; + static const state_t WRITER_PENDING = 2; + static const state_t READERS = ~(WRITER | WRITER_PENDING); + static const state_t ONE_READER = 4; + static const state_t BUSY = WRITER | READERS; + //! State of lock + /** Bit 0 = writer is holding lock + Bit 1 = request by a writer to acquire lock (hint to readers to wait) + Bit 2..N = number of readers holding lock */ + state_t state; + +private: + void __TBB_EXPORTED_METHOD internal_construct(); +}; + +__TBB_DEFINE_PROFILING_SET_NAME(spin_rw_mutex) + +} // namespace tbb + +#if __TBB_TSX_AVAILABLE +#include "internal/_x86_rtm_rw_mutex_impl.h" +#endif + +namespace tbb { +namespace interface8 { +//! A cross-platform spin reader/writer mutex with speculative lock acquisition. +/** On platforms with proper HW support, this lock may speculatively execute + its critical sections, using HW mechanisms to detect real data races and + ensure atomicity of the critical sections. In particular, it uses + Intel(R) Transactional Synchronization Extensions (Intel(R) TSX). + Without such HW support, it behaves like a spin_rw_mutex. + It should be used for locking short critical sections where the lock is + contended but the data it protects are not. + @ingroup synchronization */ +#if __TBB_TSX_AVAILABLE +typedef interface7::internal::padded_mutex speculative_spin_rw_mutex; +#else +typedef interface7::internal::padded_mutex speculative_spin_rw_mutex; +#endif +} // namespace interface8 + +using interface8::speculative_spin_rw_mutex; +__TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_rw_mutex) +} // namespace tbb +#endif /* __TBB_spin_rw_mutex_H */ diff --git a/ohos/arm64-v8a/include/tbb/task.h b/ohos/arm64-v8a/include/tbb/task.h new file mode 100644 index 00000000..d58fb361 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/task.h @@ -0,0 +1,1189 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_task_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_task_H +#pragma message("TBB Warning: tbb/task.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_task_H +#define __TBB_task_H + +#define __TBB_task_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_stddef.h" +#include "tbb_machine.h" +#include "tbb_profiling.h" +#include + +typedef struct ___itt_caller *__itt_caller; + +namespace tbb { + +class task; +class task_list; +class task_group_context; + +// MSVC does not allow taking the address of a member that was defined +// privately in task_base and made public in class task via a using declaration. +#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) +#define __TBB_TASK_BASE_ACCESS public +#else +#define __TBB_TASK_BASE_ACCESS private +#endif + +namespace internal { //< @cond INTERNAL + + class allocate_additional_child_of_proxy: no_assign { + //! No longer used, but retained for binary layout compatibility. Always NULL. + task* self; + task& parent; + public: + explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) { + suppress_unused_warning( self ); + } + task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; + void __TBB_EXPORTED_METHOD free( task& ) const; + }; + + struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; }; +} //< namespace internal @endcond + +namespace interface5 { + namespace internal { + //! Base class for methods that became static in TBB 3.0. + /** TBB's evolution caused the "this" argument for several methods to become obsolete. + However, for backwards binary compatibility, the new methods need distinct names, + otherwise the One Definition Rule would be broken. Hence the new methods are + defined in this private base class, and then exposed in class task via + using declarations. */ + class task_base: tbb::internal::no_copy { + __TBB_TASK_BASE_ACCESS: + friend class tbb::task; + + //! Schedule task for execution when a worker becomes available. + static void spawn( task& t ); + + //! Spawn multiple tasks and clear list. + static void spawn( task_list& list ); + + //! Like allocate_child, except that task's parent becomes "t", not this. + /** Typically used in conjunction with schedule_to_reexecute to implement while loops. + Atomically increments the reference count of t.parent() */ + static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) { + return tbb::internal::allocate_additional_child_of_proxy(t); + } + + //! Destroy a task. + /** Usually, calling this method is unnecessary, because a task is + implicitly deleted after its execute() method runs. However, + sometimes a task needs to be explicitly deallocated, such as + when a root task is used as the parent in spawn_and_wait_for_all. */ + static void __TBB_EXPORTED_FUNC destroy( task& victim ); + }; + } // internal +} // interface5 + +//! @cond INTERNAL +namespace internal { + + class scheduler: no_copy { + public: + //! For internal use only + virtual void spawn( task& first, task*& next ) = 0; + + //! For internal use only + virtual void wait_for_all( task& parent, task* child ) = 0; + + //! For internal use only + virtual void spawn_root_and_wait( task& first, task*& next ) = 0; + + //! Pure virtual destructor; + // Have to have it just to shut up overzealous compilation warnings + virtual ~scheduler() = 0; + + //! For internal use only + virtual void enqueue( task& t, void* reserved ) = 0; + }; + + //! A reference count + /** Should always be non-negative. A signed type is used so that underflow can be detected. */ + typedef intptr_t reference_count; + +#if __TBB_PREVIEW_RESUMABLE_TASKS + //! The flag to indicate that the wait task has been abandoned. + static const reference_count abandon_flag = reference_count(1) << (sizeof(reference_count)*CHAR_BIT - 2); +#endif + + //! An id as used for specifying affinity. + typedef unsigned short affinity_id; + +#if __TBB_TASK_ISOLATION + //! A tag for task isolation. + typedef intptr_t isolation_tag; + const isolation_tag no_isolation = 0; +#endif /* __TBB_TASK_ISOLATION */ + +#if __TBB_TASK_GROUP_CONTEXT + class generic_scheduler; + + struct context_list_node_t { + context_list_node_t *my_prev, + *my_next; + }; + + class allocate_root_with_context_proxy: no_assign { + task_group_context& my_context; + public: + allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {} + task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; + void __TBB_EXPORTED_METHOD free( task& ) const; + }; +#endif /* __TBB_TASK_GROUP_CONTEXT */ + + class allocate_root_proxy: no_assign { + public: + static task& __TBB_EXPORTED_FUNC allocate( size_t size ); + static void __TBB_EXPORTED_FUNC free( task& ); + }; + + class allocate_continuation_proxy: no_assign { + public: + task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; + void __TBB_EXPORTED_METHOD free( task& ) const; + }; + + class allocate_child_proxy: no_assign { + public: + task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; + void __TBB_EXPORTED_METHOD free( task& ) const; + }; + +#if __TBB_PREVIEW_CRITICAL_TASKS + // TODO: move to class methods when critical task API becomes public + void make_critical( task& t ); + bool is_critical( task& t ); +#endif + + //! Memory prefix to a task object. + /** This class is internal to the library. + Do not reference it directly, except within the library itself. + Fields are ordered in way that preserves backwards compatibility and yields good packing on + typical 32-bit and 64-bit platforms. New fields should be added at the beginning for + backward compatibility with accesses to the task prefix inlined into application code. To + prevent ODR violation, the class shall have the same layout in all application translation + units. If some fields are conditional (e.g. enabled by preview macros) and might get + skipped, use reserved fields to adjust the layout. + + In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64 architectures + correspondingly, consider dynamic setting of task_alignment and task_prefix_reservation_size + based on the maximal operand size supported by the current CPU. + + @ingroup task_scheduling */ + class task_prefix { + private: + friend class tbb::task; + friend class tbb::interface5::internal::task_base; + friend class tbb::task_list; + friend class internal::scheduler; + friend class internal::allocate_root_proxy; + friend class internal::allocate_child_proxy; + friend class internal::allocate_continuation_proxy; + friend class internal::allocate_additional_child_of_proxy; +#if __TBB_PREVIEW_CRITICAL_TASKS + friend void make_critical( task& ); + friend bool is_critical( task& ); +#endif + +#if __TBB_TASK_ISOLATION + //! The tag used for task isolation. + isolation_tag isolation; +#else + intptr_t reserved_space_for_task_isolation_tag; +#endif /* __TBB_TASK_ISOLATION */ + +#if __TBB_TASK_GROUP_CONTEXT + //! Shared context that is used to communicate asynchronous state changes + /** Currently it is used to broadcast cancellation requests generated both + by users and as the result of unhandled exceptions in the task::execute() + methods. */ + task_group_context *context; +#endif /* __TBB_TASK_GROUP_CONTEXT */ + + //! The scheduler that allocated the task, or NULL if the task is big. + /** Small tasks are pooled by the scheduler that allocated the task. + If a scheduler needs to free a small task allocated by another scheduler, + it returns the task to that other scheduler. This policy avoids + memory space blowup issues for memory allocators that allocate from + thread-specific pools. */ + scheduler* origin; + +#if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS + union { +#endif /* __TBB_TASK_PRIORITY */ + //! Obsolete. The scheduler that owns the task. + /** Retained only for the sake of backward binary compatibility. + Still used by inline methods in the task.h header. **/ + scheduler* owner; + +#if __TBB_TASK_PRIORITY + //! Pointer to the next offloaded lower priority task. + /** Used to maintain a list of offloaded tasks inside the scheduler. **/ + task* next_offloaded; +#endif + +#if __TBB_PREVIEW_RESUMABLE_TASKS + //! Pointer to the abandoned scheduler where the current task is waited for. + scheduler* abandoned_scheduler; +#endif +#if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS + }; +#endif /* __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS */ + + //! The task whose reference count includes me. + /** In the "blocking style" of programming, this field points to the parent task. + In the "continuation-passing style" of programming, this field points to the + continuation of the parent. */ + tbb::task* parent; + + //! Reference count used for synchronization. + /** In the "continuation-passing style" of programming, this field is + the difference of the number of allocated children minus the + number of children that have completed. + In the "blocking style" of programming, this field is one more than the difference. */ + __TBB_atomic reference_count ref_count; + + //! Obsolete. Used to be scheduling depth before TBB 2.2 + /** Retained only for the sake of backward binary compatibility. + Not used by TBB anymore. **/ + int depth; + + //! A task::state_type, stored as a byte for compactness. + /** This state is exposed to users via method task::state(). */ + unsigned char state; + + //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness. + /** 0x0 -> version 1.0 task + 0x1 -> version >=2.1 task + 0x10 -> task was enqueued + 0x20 -> task_proxy + 0x40 -> task has live ref_count + 0x80 -> a stolen task */ + unsigned char extra_state; + + affinity_id affinity; + + //! "next" field for list of task + tbb::task* next; + + //! The task corresponding to this task_prefix. + tbb::task& task() {return *reinterpret_cast(this+1);} + }; + +} // namespace internal +//! @endcond + +#if __TBB_TASK_GROUP_CONTEXT + +#if __TBB_TASK_PRIORITY +namespace internal { + static const int priority_stride_v4 = INT_MAX / 4; +#if __TBB_PREVIEW_CRITICAL_TASKS + // TODO: move into priority_t enum when critical tasks become public feature + static const int priority_critical = priority_stride_v4 * 3 + priority_stride_v4 / 3 * 2; +#endif +} + +enum priority_t { + priority_normal = internal::priority_stride_v4 * 2, + priority_low = priority_normal - internal::priority_stride_v4, + priority_high = priority_normal + internal::priority_stride_v4 +}; + +#endif /* __TBB_TASK_PRIORITY */ + +#if TBB_USE_CAPTURED_EXCEPTION + class tbb_exception; +#else + namespace internal { + class tbb_exception_ptr; + } +#endif /* !TBB_USE_CAPTURED_EXCEPTION */ + +class task_scheduler_init; +namespace interface7 { class task_arena; } +using interface7::task_arena; + +//! Used to form groups of tasks +/** @ingroup task_scheduling + The context services explicit cancellation requests from user code, and unhandled + exceptions intercepted during tasks execution. Intercepting an exception results + in generating internal cancellation requests (which is processed in exactly the + same way as external ones). + + The context is associated with one or more root tasks and defines the cancellation + group that includes all the descendants of the corresponding root task(s). Association + is established when a context object is passed as an argument to the task::allocate_root() + method. See task_group_context::task_group_context for more details. + + The context can be bound to another one, and other contexts can be bound to it, + forming a tree-like structure: parent -> this -> children. Arrows here designate + cancellation propagation direction. If a task in a cancellation group is cancelled + all the other tasks in this group and groups bound to it (as children) get cancelled too. + + IMPLEMENTATION NOTE: + When adding new members to task_group_context or changing types of existing ones, + update the size of both padding buffers (_leading_padding and _trailing_padding) + appropriately. See also VERSIONING NOTE at the constructor definition below. **/ +class task_group_context : internal::no_copy { +private: + friend class internal::generic_scheduler; + friend class task_scheduler_init; + friend class task_arena; + +#if TBB_USE_CAPTURED_EXCEPTION + typedef tbb_exception exception_container_type; +#else + typedef internal::tbb_exception_ptr exception_container_type; +#endif + + enum version_traits_word_layout { + traits_offset = 16, + version_mask = 0xFFFF, + traits_mask = 0xFFFFul << traits_offset + }; + +public: + enum kind_type { + isolated, + bound + }; + + enum traits_type { + exact_exception = 0x0001ul << traits_offset, +#if __TBB_FP_CONTEXT + fp_settings = 0x0002ul << traits_offset, +#endif + concurrent_wait = 0x0004ul << traits_offset, +#if TBB_USE_CAPTURED_EXCEPTION + default_traits = 0 +#else + default_traits = exact_exception +#endif /* !TBB_USE_CAPTURED_EXCEPTION */ + }; + +private: + enum state { + may_have_children = 1, + // the following enumerations must be the last, new 2^x values must go above + next_state_value, low_unused_state_bit = (next_state_value-1)*2 + }; + + union { + //! Flavor of this context: bound or isolated. + // TODO: describe asynchronous use, and whether any memory semantics are needed + __TBB_atomic kind_type my_kind; + uintptr_t _my_kind_aligner; + }; + + //! Pointer to the context of the parent cancellation group. NULL for isolated contexts. + task_group_context *my_parent; + + //! Used to form the thread specific list of contexts without additional memory allocation. + /** A context is included into the list of the current thread when its binding to + its parent happens. Any context can be present in the list of one thread only. **/ + internal::context_list_node_t my_node; + + //! Used to set and maintain stack stitching point for Intel Performance Tools. + __itt_caller itt_caller; + + //! Leading padding protecting accesses to frequently used members from false sharing. + /** Read accesses to the field my_cancellation_requested are on the hot path inside + the scheduler. This padding ensures that this field never shares the same cache + line with a local variable that is frequently written to. **/ + char _leading_padding[internal::NFS_MaxLineSize + - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t) + - sizeof(__itt_caller) +#if __TBB_FP_CONTEXT + - sizeof(internal::cpu_ctl_env_space) +#endif + ]; + +#if __TBB_FP_CONTEXT + //! Space for platform-specific FPU settings. + /** Must only be accessed inside TBB binaries, and never directly in user + code or inline methods. */ + internal::cpu_ctl_env_space my_cpu_ctl_env; +#endif + + //! Specifies whether cancellation was requested for this task group. + uintptr_t my_cancellation_requested; + + //! Version for run-time checks and behavioral traits of the context. + /** Version occupies low 16 bits, and traits (zero or more ORed enumerators + from the traits_type enumerations) take the next 16 bits. + Original (zeroth) version of the context did not support any traits. **/ + uintptr_t my_version_and_traits; + + //! Pointer to the container storing exception being propagated across this task group. + exception_container_type *my_exception; + + //! Scheduler instance that registered this context in its thread specific list. + internal::generic_scheduler *my_owner; + + //! Internal state (combination of state flags, currently only may_have_children). + uintptr_t my_state; + +#if __TBB_TASK_PRIORITY + //! Priority level of the task group (in normalized representation) + intptr_t my_priority; +#endif /* __TBB_TASK_PRIORITY */ + + //! Description of algorithm for scheduler based instrumentation. + internal::string_index my_name; + + //! Trailing padding protecting accesses to frequently used members from false sharing + /** \sa _leading_padding **/ + char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*) +#if __TBB_TASK_PRIORITY + - sizeof(intptr_t) +#endif /* __TBB_TASK_PRIORITY */ + - sizeof(internal::string_index) + ]; + +public: + //! Default & binding constructor. + /** By default a bound context is created. That is this context will be bound + (as child) to the context of the task calling task::allocate_root(this_context) + method. Cancellation requests passed to the parent context are propagated + to all the contexts bound to it. Similarly priority change is propagated + from the parent context to its children. + + If task_group_context::isolated is used as the argument, then the tasks associated + with this context will never be affected by events in any other context. + + Creating isolated contexts involve much less overhead, but they have limited + utility. Normally when an exception occurs in an algorithm that has nested + ones running, it is desirably to have all the nested algorithms cancelled + as well. Such a behavior requires nested algorithms to use bound contexts. + + There is one good place where using isolated algorithms is beneficial. It is + a master thread. That is if a particular algorithm is invoked directly from + the master thread (not from a TBB task), supplying it with explicitly + created isolated context will result in a faster algorithm startup. + + VERSIONING NOTE: + Implementation(s) of task_group_context constructor(s) cannot be made + entirely out-of-line because the run-time version must be set by the user + code. This will become critically important for binary compatibility, if + we ever have to change the size of the context object. + + Boosting the runtime version will also be necessary if new data fields are + introduced in the currently unused padding areas and these fields are updated + by inline methods. **/ + task_group_context ( kind_type relation_with_parent = bound, + uintptr_t t = default_traits ) + : my_kind(relation_with_parent) + , my_version_and_traits(3 | t) + , my_name(internal::CUSTOM_CTX) + { + init(); + } + + // Custom constructor for instrumentation of tbb algorithm + task_group_context ( internal::string_index name ) + : my_kind(bound) + , my_version_and_traits(3 | default_traits) + , my_name(name) + { + init(); + } + + // Do not introduce standalone unbind method since it will break state propagation assumptions + __TBB_EXPORTED_METHOD ~task_group_context (); + + //! Forcefully reinitializes the context after the task tree it was associated with is completed. + /** Because the method assumes that all the tasks that used to be associated with + this context have already finished, calling it while the context is still + in use somewhere in the task hierarchy leads to undefined behavior. + + IMPORTANT: This method is not thread safe! + + The method does not change the context's parent if it is set. **/ + void __TBB_EXPORTED_METHOD reset (); + + //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. + /** \return false if cancellation has already been requested, true otherwise. + + Note that canceling never fails. When false is returned, it just means that + another thread (or this one) has already sent cancellation request to this + context or to one of its ancestors (if this context is bound). It is guaranteed + that when this method is concurrently called on the same not yet cancelled + context, true will be returned by one and only one invocation. **/ + bool __TBB_EXPORTED_METHOD cancel_group_execution (); + + //! Returns true if the context received cancellation request. + bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const; + + //! Records the pending exception, and cancels the task group. + /** May be called only from inside a catch-block. If the context is already + cancelled, does nothing. + The method brings the task group associated with this context exactly into + the state it would be in, if one of its tasks threw the currently pending + exception during its execution. In other words, it emulates the actions + of the scheduler's dispatch loop exception handler. **/ + void __TBB_EXPORTED_METHOD register_pending_exception (); + +#if __TBB_FP_CONTEXT + //! Captures the current FPU control settings to the context. + /** Because the method assumes that all the tasks that used to be associated with + this context have already finished, calling it while the context is still + in use somewhere in the task hierarchy leads to undefined behavior. + + IMPORTANT: This method is not thread safe! + + The method does not change the FPU control settings of the context's parent. **/ + void __TBB_EXPORTED_METHOD capture_fp_settings (); +#endif + +#if __TBB_TASK_PRIORITY + //! Changes priority of the task group + __TBB_DEPRECATED_IN_VERBOSE_MODE void set_priority ( priority_t ); + + //! Retrieves current priority of the current task group + __TBB_DEPRECATED_IN_VERBOSE_MODE priority_t priority () const; +#endif /* __TBB_TASK_PRIORITY */ + + //! Returns the context's trait + uintptr_t traits() const { return my_version_and_traits & traits_mask; } + +protected: + //! Out-of-line part of the constructor. + /** Singled out to ensure backward binary compatibility of the future versions. **/ + void __TBB_EXPORTED_METHOD init (); + +private: + friend class task; + friend class internal::allocate_root_with_context_proxy; + + static const kind_type binding_required = bound; + static const kind_type binding_completed = kind_type(bound+1); + static const kind_type detached = kind_type(binding_completed+1); + static const kind_type dying = kind_type(detached+1); + + //! Propagates any state change detected to *this, and as an optimisation possibly also upward along the heritage line. + template + void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ); + + //! Registers this context with the local scheduler and binds it to its parent context + void bind_to ( internal::generic_scheduler *local_sched ); + + //! Registers this context with the local scheduler + void register_with ( internal::generic_scheduler *local_sched ); + +#if __TBB_FP_CONTEXT + //! Copies FPU control setting from another context + // TODO: Consider adding #else stub in order to omit #if sections in other code + void copy_fp_settings( const task_group_context &src ); +#endif /* __TBB_FP_CONTEXT */ +}; // class task_group_context + +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +//! Base class for user-defined tasks. +/** @ingroup task_scheduling */ +class __TBB_DEPRECATED_IN_VERBOSE_MODE task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { + + //! Set reference count + void __TBB_EXPORTED_METHOD internal_set_ref_count( int count ); + + //! Decrement reference count and return its new value. + internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count(); + +protected: + //! Default constructor. + task() {prefix().extra_state=1;} + +public: + //! Destructor. + virtual ~task() {} + + //! Should be overridden by derived classes. + virtual task* execute() = 0; + + //! Enumeration of task states that the scheduler considers. + enum state_type { + //! task is running, and will be destroyed after method execute() completes. + executing, + //! task to be rescheduled. + reexecute, + //! task is in ready pool, or is going to be put there, or was just taken off. + ready, + //! task object is freshly allocated or recycled. + allocated, + //! task object is on free list, or is going to be put there, or was just taken off. + freed, + //! task to be recycled as continuation + recycle +#if __TBB_RECYCLE_TO_ENQUEUE + //! task to be scheduled for starvation-resistant execution + ,to_enqueue +#endif +#if __TBB_PREVIEW_RESUMABLE_TASKS + //! a special task used to resume a scheduler. + ,to_resume +#endif + }; + + //------------------------------------------------------------------------ + // Allocating tasks + //------------------------------------------------------------------------ + + //! Returns proxy for overloaded new that allocates a root task. + static internal::allocate_root_proxy allocate_root() { + return internal::allocate_root_proxy(); + } + +#if __TBB_TASK_GROUP_CONTEXT + //! Returns proxy for overloaded new that allocates a root task associated with user supplied context. + static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) { + return internal::allocate_root_with_context_proxy(ctx); + } +#endif /* __TBB_TASK_GROUP_CONTEXT */ + + //! Returns proxy for overloaded new that allocates a continuation task of *this. + /** The continuation's parent becomes the parent of *this. */ + internal::allocate_continuation_proxy& allocate_continuation() { + return *reinterpret_cast(this); + } + + //! Returns proxy for overloaded new that allocates a child task of *this. + internal::allocate_child_proxy& allocate_child() { + return *reinterpret_cast(this); + } + + //! Define recommended static form via import from base class. + using task_base::allocate_additional_child_of; + +#if __TBB_DEPRECATED_TASK_INTERFACE + //! Destroy a task. + /** Usually, calling this method is unnecessary, because a task is + implicitly deleted after its execute() method runs. However, + sometimes a task needs to be explicitly deallocated, such as + when a root task is used as the parent in spawn_and_wait_for_all. */ + void __TBB_EXPORTED_METHOD destroy( task& t ); +#else /* !__TBB_DEPRECATED_TASK_INTERFACE */ + //! Define recommended static form via import from base class. + using task_base::destroy; +#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */ + + //------------------------------------------------------------------------ + // Recycling of tasks + //------------------------------------------------------------------------ + + //! Change this to be a continuation of its former self. + /** The caller must guarantee that the task's refcount does not become zero until + after the method execute() returns. Typically, this is done by having + method execute() return a pointer to a child of the task. If the guarantee + cannot be made, use method recycle_as_safe_continuation instead. + + Because of the hazard, this method may be deprecated in the future. */ + void recycle_as_continuation() { + __TBB_ASSERT( prefix().state==executing, "execute not running?" ); + prefix().state = allocated; + } + + //! Recommended to use, safe variant of recycle_as_continuation + /** For safety, it requires additional increment of ref_count. + With no descendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */ + void recycle_as_safe_continuation() { + __TBB_ASSERT( prefix().state==executing, "execute not running?" ); + prefix().state = recycle; + } + + //! Change this to be a child of new_parent. + void recycle_as_child_of( task& new_parent ) { + internal::task_prefix& p = prefix(); + __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" ); + __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" ); + __TBB_ASSERT( p.parent==NULL, "parent must be null" ); + __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" ); + __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" ); + p.state = allocated; + p.parent = &new_parent; +#if __TBB_TASK_GROUP_CONTEXT + p.context = new_parent.prefix().context; +#endif /* __TBB_TASK_GROUP_CONTEXT */ + } + + //! Schedule this for reexecution after current execute() returns. + /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */ + void recycle_to_reexecute() { + __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" ); + __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" ); + prefix().state = reexecute; + } + +#if __TBB_RECYCLE_TO_ENQUEUE + //! Schedule this to enqueue after descendant tasks complete. + /** Save enqueue/spawn difference, it has the semantics of recycle_as_safe_continuation. */ + void recycle_to_enqueue() { + __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" ); + prefix().state = to_enqueue; + } +#endif /* __TBB_RECYCLE_TO_ENQUEUE */ + + //------------------------------------------------------------------------ + // Spawning and blocking + //------------------------------------------------------------------------ + + //! Set reference count + void set_ref_count( int count ) { +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + internal_set_ref_count(count); +#else + prefix().ref_count = count; +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ + } + + //! Atomically increment reference count. + /** Has acquire semantics */ + void increment_ref_count() { + __TBB_FetchAndIncrementWacquire( &prefix().ref_count ); + } + + //! Atomically adds to reference count and returns its new value. + /** Has release-acquire semantics */ + int add_ref_count( int count ) { + internal::call_itt_notify( internal::releasing, &prefix().ref_count ); + internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count ); + __TBB_ASSERT( k>=0, "task's reference count underflowed" ); + if( k==0 ) + internal::call_itt_notify( internal::acquired, &prefix().ref_count ); + return int(k); + } + + //! Atomically decrement reference count and returns its new value. + /** Has release semantics. */ + int decrement_ref_count() { +#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT + return int(internal_decrement_ref_count()); +#else + return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1; +#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ + } + + //! Define recommended static forms via import from base class. + using task_base::spawn; + + //! Similar to spawn followed by wait_for_all, but more efficient. + void spawn_and_wait_for_all( task& child ) { + prefix().owner->wait_for_all( *this, &child ); + } + + //! Similar to spawn followed by wait_for_all, but more efficient. + void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list ); + + //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it. + static void spawn_root_and_wait( task& root ) { + root.prefix().owner->spawn_root_and_wait( root, root.prefix().next ); + } + + //! Spawn root tasks on list and wait for all of them to finish. + /** If there are more tasks than worker threads, the tasks are spawned in + order of front to back. */ + static void spawn_root_and_wait( task_list& root_list ); + + //! Wait for reference count to become one, and set reference count to zero. + /** Works on tasks while waiting. */ + void wait_for_all() { + prefix().owner->wait_for_all( *this, NULL ); + } + + //! Enqueue task for starvation-resistant execution. +#if __TBB_TASK_PRIORITY + /** The task will be enqueued on the normal priority level disregarding the + priority of its task group. + + The rationale of such semantics is that priority of an enqueued task is + statically fixed at the moment of its enqueuing, while task group priority + is dynamic. Thus automatic priority inheritance would be generally a subject + to the race, which may result in unexpected behavior. + + Use enqueue() overload with explicit priority value and task::group_priority() + method to implement such priority inheritance when it is really necessary. **/ +#endif /* __TBB_TASK_PRIORITY */ + static void enqueue( task& t ) { + t.prefix().owner->enqueue( t, NULL ); + } + +#if __TBB_TASK_PRIORITY + //! Enqueue task for starvation-resistant execution on the specified priority level. + static void enqueue( task& t, priority_t p ) { +#if __TBB_PREVIEW_CRITICAL_TASKS + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high + || p == internal::priority_critical, "Invalid priority level value"); +#else + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value"); +#endif + t.prefix().owner->enqueue( t, (void*)p ); + } +#endif /* __TBB_TASK_PRIORITY */ + + //! Enqueue task in task_arena + //! The implementation is in task_arena.h +#if __TBB_TASK_PRIORITY + inline static void enqueue( task& t, task_arena& arena, priority_t p = priority_t(0) ); +#else + inline static void enqueue( task& t, task_arena& arena); +#endif + + //! The innermost task being executed or destroyed by the current thread at the moment. + static task& __TBB_EXPORTED_FUNC self(); + + //! task on whose behalf this task is working, or NULL if this is a root. + task* parent() const {return prefix().parent;} + + //! sets parent task pointer to specified value + void set_parent(task* p) { +#if __TBB_TASK_GROUP_CONTEXT + __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context"); +#endif + prefix().parent = p; + } + +#if __TBB_TASK_GROUP_CONTEXT + //! This method is deprecated and will be removed in the future. + /** Use method group() instead. **/ + task_group_context* context() {return prefix().context;} + + //! Pointer to the task group descriptor. + task_group_context* group () { return prefix().context; } +#endif /* __TBB_TASK_GROUP_CONTEXT */ + + //! True if task was stolen from the task pool of another thread. + bool is_stolen_task() const { + return (prefix().extra_state & 0x80)!=0; + } + + //! True if the task was enqueued + bool is_enqueued_task() const { + // es_task_enqueued = 0x10 + return (prefix().extra_state & 0x10)!=0; + } + +#if __TBB_PREVIEW_RESUMABLE_TASKS + //! Type that defines suspension point + typedef void* suspend_point; + + //! Suspend current task execution + template + static void suspend(F f); + + //! Resume specific suspend point + static void resume(suspend_point tag); +#endif + + //------------------------------------------------------------------------ + // Debugging + //------------------------------------------------------------------------ + + //! Current execution state + state_type state() const {return state_type(prefix().state);} + + //! The internal reference count. + int ref_count() const { +#if TBB_USE_ASSERT +#if __TBB_PREVIEW_RESUMABLE_TASKS + internal::reference_count ref_count_ = prefix().ref_count & ~internal::abandon_flag; +#else + internal::reference_count ref_count_ = prefix().ref_count; +#endif + __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error"); +#endif +#if __TBB_PREVIEW_RESUMABLE_TASKS + return int(prefix().ref_count & ~internal::abandon_flag); +#else + return int(prefix().ref_count); +#endif + } + + //! Obsolete, and only retained for the sake of backward compatibility. Always returns true. + bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const; + + //------------------------------------------------------------------------ + // Affinity + //------------------------------------------------------------------------ + + //! An id as used for specifying affinity. + /** Guaranteed to be integral type. Value of 0 means no affinity. */ + typedef internal::affinity_id affinity_id; + + //! Set affinity for this task. + void set_affinity( affinity_id id ) {prefix().affinity = id;} + + //! Current affinity of this task + affinity_id affinity() const {return prefix().affinity;} + + //! Invoked by scheduler to notify task that it ran on unexpected thread. + /** Invoked before method execute() runs, if task is stolen, or task has + affinity but will be executed on another thread. + + The default action does nothing. */ + virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id ); + +#if __TBB_TASK_GROUP_CONTEXT + //! Moves this task from its current group into another one. + /** Argument ctx specifies the new group. + + The primary purpose of this method is to associate unique task group context + with a task allocated for subsequent enqueuing. In contrast to spawned tasks + enqueued ones normally outlive the scope where they were created. This makes + traditional usage model where task group context are allocated locally on + the stack inapplicable. Dynamic allocation of context objects is performance + inefficient. Method change_group() allows to make task group context object + a member of the task class, and then associate it with its containing task + object in the latter's constructor. **/ + void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx ); + + //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. + /** \return false if cancellation has already been requested, true otherwise. **/ + bool cancel_group_execution () { return prefix().context->cancel_group_execution(); } + + //! Returns true if the context has received cancellation request. + bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); } +#else + bool is_cancelled () const { return false; } +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +#if __TBB_TASK_PRIORITY + //! Changes priority of the task group this task belongs to. + __TBB_DEPRECATED void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); } + + //! Retrieves current priority of the task group this task belongs to. + __TBB_DEPRECATED priority_t group_priority () const { return prefix().context->priority(); } + +#endif /* __TBB_TASK_PRIORITY */ + +private: + friend class interface5::internal::task_base; + friend class task_list; + friend class internal::scheduler; + friend class internal::allocate_root_proxy; +#if __TBB_TASK_GROUP_CONTEXT + friend class internal::allocate_root_with_context_proxy; +#endif /* __TBB_TASK_GROUP_CONTEXT */ + friend class internal::allocate_continuation_proxy; + friend class internal::allocate_child_proxy; + friend class internal::allocate_additional_child_of_proxy; + + //! Get reference to corresponding task_prefix. + /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/ + internal::task_prefix& prefix( internal::version_tag* = NULL ) const { + return reinterpret_cast(const_cast(this))[-1]; + } +#if __TBB_PREVIEW_CRITICAL_TASKS + friend void internal::make_critical( task& ); + friend bool internal::is_critical( task& ); +#endif +}; // class task + +#if __TBB_PREVIEW_CRITICAL_TASKS +namespace internal { +inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; } +inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); } +} // namespace internal +#endif /* __TBB_PREVIEW_CRITICAL_TASKS */ + +#if __TBB_PREVIEW_RESUMABLE_TASKS +namespace internal { + template + static void suspend_callback(void* user_callback, task::suspend_point tag) { + // Copy user function to a new stack to avoid a race when the previous scheduler is resumed. + F user_callback_copy = *static_cast(user_callback); + user_callback_copy(tag); + } + void __TBB_EXPORTED_FUNC internal_suspend(void* suspend_callback, void* user_callback); + void __TBB_EXPORTED_FUNC internal_resume(task::suspend_point); + task::suspend_point __TBB_EXPORTED_FUNC internal_current_suspend_point(); +} + +template +inline void task::suspend(F f) { + internal::internal_suspend((void*)internal::suspend_callback, &f); +} +inline void task::resume(suspend_point tag) { + internal::internal_resume(tag); +} +#endif + +//! task that does nothing. Useful for synchronization. +/** @ingroup task_scheduling */ +class __TBB_DEPRECATED_IN_VERBOSE_MODE empty_task: public task { + task* execute() __TBB_override { + return NULL; + } +}; + +//! @cond INTERNAL +namespace internal { + template + class function_task : public task { +#if __TBB_ALLOW_MUTABLE_FUNCTORS + // TODO: deprecated behavior, remove + F my_func; +#else + const F my_func; +#endif + task* execute() __TBB_override { + my_func(); + return NULL; + } + public: + function_task( const F& f ) : my_func(f) {} +#if __TBB_CPP11_RVALUE_REF_PRESENT + function_task( F&& f ) : my_func( std::move(f) ) {} +#endif + }; +} // namespace internal +//! @endcond + +//! A list of children. +/** Used for method task::spawn_children + @ingroup task_scheduling */ +class __TBB_DEPRECATED_IN_VERBOSE_MODE task_list: internal::no_copy { +private: + task* first; + task** next_ptr; + friend class task; + friend class interface5::internal::task_base; +public: + //! Construct empty list + task_list() : first(NULL), next_ptr(&first) {} + + //! Destroys the list, but does not destroy the task objects. + ~task_list() {} + + //! True if list is empty; false otherwise. + bool empty() const {return !first;} + + //! Push task onto back of list. + void push_back( task& task ) { + task.prefix().next = NULL; + *next_ptr = &task; + next_ptr = &task.prefix().next; + } +#if __TBB_TODO + // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn + //! Push task onto front of list (FIFO local execution, like individual spawning in the same order). + void push_front( task& task ) { + if( empty() ) { + push_back(task); + } else { + task.prefix().next = first; + first = &task; + } + } +#endif + //! Pop the front task from the list. + task& pop_front() { + __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" ); + task* result = first; + first = result->prefix().next; + if( !first ) next_ptr = &first; + return *result; + } + + //! Clear the list + void clear() { + first=NULL; + next_ptr=&first; + } +}; + +inline void interface5::internal::task_base::spawn( task& t ) { + t.prefix().owner->spawn( t, t.prefix().next ); +} + +inline void interface5::internal::task_base::spawn( task_list& list ) { + if( task* t = list.first ) { + t->prefix().owner->spawn( *t, *list.next_ptr ); + list.clear(); + } +} + +inline void task::spawn_root_and_wait( task_list& root_list ) { + if( task* t = root_list.first ) { + t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr ); + root_list.clear(); + } +} + +} // namespace tbb + +inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) { + return &tbb::internal::allocate_root_proxy::allocate(bytes); +} + +inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) { + tbb::internal::allocate_root_proxy::free( *static_cast(task) ); +} + +#if __TBB_TASK_GROUP_CONTEXT +inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) { + return &p.allocate(bytes); +} + +inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) { + p.free( *static_cast(task) ); +} +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) { + return &p.allocate(bytes); +} + +inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) { + p.free( *static_cast(task) ); +} + +inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) { + return &p.allocate(bytes); +} + +inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) { + p.free( *static_cast(task) ); +} + +inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) { + return &p.allocate(bytes); +} + +inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) { + p.free( *static_cast(task) ); +} + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_H_include_area + +#endif /* __TBB_task_H */ diff --git a/ohos/arm64-v8a/include/tbb/task_arena.h b/ohos/arm64-v8a/include/tbb/task_arena.h new file mode 100644 index 00000000..46eb4959 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/task_arena.h @@ -0,0 +1,511 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_task_arena_H +#define __TBB_task_arena_H + +#define __TBB_task_arena_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "task.h" +#include "tbb_exception.h" +#include "internal/_template_helpers.h" +#if __TBB_NUMA_SUPPORT +#include "info.h" +#endif /*__TBB_NUMA_SUPPORT*/ +#if TBB_USE_THREADING_TOOLS +#include "atomic.h" // for as_atomic +#endif +#include "aligned_space.h" + +namespace tbb { + +namespace this_task_arena { + int max_concurrency(); +} // namespace this_task_arena + +//! @cond INTERNAL +namespace internal { + //! Internal to library. Should not be used by clients. + /** @ingroup task_scheduling */ + class arena; + class task_scheduler_observer_v3; +} // namespace internal +//! @endcond + +namespace interface7 { +class task_arena; + +//! @cond INTERNAL +namespace internal { +using namespace tbb::internal; //e.g. function_task from task.h + +class delegate_base : no_assign { +public: + virtual void operator()() const = 0; + virtual ~delegate_base() {} +}; + +// If decltype is available, the helper detects the return type of functor of specified type, +// otherwise it defines the void type. +template +struct return_type_or_void { +#if __TBB_CPP11_DECLTYPE_PRESENT && !__TBB_CPP11_DECLTYPE_OF_FUNCTION_RETURN_TYPE_BROKEN + typedef decltype(declval()()) type; +#else + typedef void type; +#endif +}; + +template +class delegated_function : public delegate_base { + F &my_func; + tbb::aligned_space my_return_storage; + // The function should be called only once. + void operator()() const __TBB_override { + new (my_return_storage.begin()) R(my_func()); + } +public: + delegated_function(F& f) : my_func(f) {} + // The function can be called only after operator() and only once. + R consume_result() const { + return tbb::internal::move(*(my_return_storage.begin())); + } + ~delegated_function() { + my_return_storage.begin()->~R(); + } +}; + +template +class delegated_function : public delegate_base { + F &my_func; + void operator()() const __TBB_override { + my_func(); + } +public: + delegated_function(F& f) : my_func(f) {} + void consume_result() const {} + + friend class task_arena_base; +}; + +class task_arena_base { +#if __TBB_NUMA_SUPPORT +public: + // TODO: consider version approach to resolve backward compatibility potential issues. + struct constraints { + constraints(numa_node_id id = automatic, int maximal_concurrency = automatic) + : numa_id(id) + , max_concurrency(maximal_concurrency) + {} + numa_node_id numa_id; + int max_concurrency; + }; +#endif /*__TBB_NUMA_SUPPORT*/ +protected: + //! NULL if not currently initialized. + internal::arena* my_arena; + +#if __TBB_TASK_GROUP_CONTEXT + //! default context of the arena + task_group_context *my_context; +#endif + + //! Concurrency level for deferred initialization + int my_max_concurrency; + + //! Reserved master slots + unsigned my_master_slots; + + //! Special settings + intptr_t my_version_and_traits; + + bool my_initialized; + +#if __TBB_NUMA_SUPPORT + //! The NUMA node index to which the arena will be attached + numa_node_id my_numa_id; + + // Do not access my_numa_id without the following runtime check. + // Despite my_numa_id is accesible, it does not exist in task_arena_base on user side + // if TBB_PREVIEW_NUMA_SUPPORT macro is not defined by the user. To be sure that + // my_numa_id exists in task_arena_base layout we check the traits. + // TODO: Consider increasing interface version for task_arena_base instead of this runtime check. + numa_node_id numa_id() { + return (my_version_and_traits & numa_support_flag) == numa_support_flag ? my_numa_id : automatic; + } +#endif + + enum { + default_flags = 0 +#if __TBB_TASK_GROUP_CONTEXT + | (task_group_context::default_traits & task_group_context::exact_exception) // 0 or 1 << 16 + , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly +#endif +#if __TBB_NUMA_SUPPORT + , numa_support_flag = 1 +#endif + }; + + task_arena_base(int max_concurrency, unsigned reserved_for_masters) + : my_arena(0) +#if __TBB_TASK_GROUP_CONTEXT + , my_context(0) +#endif + , my_max_concurrency(max_concurrency) + , my_master_slots(reserved_for_masters) +#if __TBB_NUMA_SUPPORT + , my_version_and_traits(default_flags | numa_support_flag) +#else + , my_version_and_traits(default_flags) +#endif + , my_initialized(false) +#if __TBB_NUMA_SUPPORT + , my_numa_id(automatic) +#endif + {} + +#if __TBB_NUMA_SUPPORT + task_arena_base(const constraints& constraints_, unsigned reserved_for_masters) + : my_arena(0) +#if __TBB_TASK_GROUP_CONTEXT + , my_context(0) +#endif + , my_max_concurrency(constraints_.max_concurrency) + , my_master_slots(reserved_for_masters) + , my_version_and_traits(default_flags | numa_support_flag) + , my_initialized(false) + , my_numa_id(constraints_.numa_id ) + {} +#endif /*__TBB_NUMA_SUPPORT*/ + + void __TBB_EXPORTED_METHOD internal_initialize(); + void __TBB_EXPORTED_METHOD internal_terminate(); + void __TBB_EXPORTED_METHOD internal_attach(); + void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const; + void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const; + void __TBB_EXPORTED_METHOD internal_wait() const; + static int __TBB_EXPORTED_FUNC internal_current_slot(); + static int __TBB_EXPORTED_FUNC internal_max_concurrency( const task_arena * ); +public: + //! Typedef for number of threads that is automatic. + static const int automatic = -1; + static const int not_initialized = -2; + +}; + +#if __TBB_TASK_ISOLATION +void __TBB_EXPORTED_FUNC isolate_within_arena( delegate_base& d, intptr_t isolation = 0 ); + +template +R isolate_impl(F& f) { + delegated_function d(f); + isolate_within_arena(d); + return d.consume_result(); +} +#endif /* __TBB_TASK_ISOLATION */ +} // namespace internal +//! @endcond + +/** 1-to-1 proxy representation class of scheduler's arena + * Constructors set up settings only, real construction is deferred till the first method invocation + * Destructor only removes one of the references to the inner arena representation. + * Final destruction happens when all the references (and the work) are gone. + */ +class task_arena : public internal::task_arena_base { + friend class tbb::internal::task_scheduler_observer_v3; + friend void task::enqueue(task&, task_arena& +#if __TBB_TASK_PRIORITY + , priority_t +#endif + ); + friend int tbb::this_task_arena::max_concurrency(); + void mark_initialized() { + __TBB_ASSERT( my_arena, "task_arena initialization is incomplete" ); +#if __TBB_TASK_GROUP_CONTEXT + __TBB_ASSERT( my_context, "task_arena initialization is incomplete" ); +#endif +#if TBB_USE_THREADING_TOOLS + // Actual synchronization happens in internal_initialize & internal_attach. + // The race on setting my_initialized is benign, but should be hidden from Intel(R) Inspector + internal::as_atomic(my_initialized).fetch_and_store(true); +#else + my_initialized = true; +#endif + } + + template + void enqueue_impl( __TBB_FORWARDING_REF(F) f +#if __TBB_TASK_PRIORITY + , priority_t p = priority_t(0) +#endif + ) { +#if !__TBB_TASK_PRIORITY + intptr_t p = 0; +#endif + initialize(); +#if __TBB_TASK_GROUP_CONTEXT + internal_enqueue(*new(task::allocate_root(*my_context)) internal::function_task< typename internal::strip::type >(internal::forward(f)), p); +#else + internal_enqueue(*new(task::allocate_root()) internal::function_task< typename internal::strip::type >(internal::forward(f)), p); +#endif /* __TBB_TASK_GROUP_CONTEXT */ + } + + template + R execute_impl(F& f) { + initialize(); + internal::delegated_function d(f); + internal_execute(d); + return d.consume_result(); + } + +public: + //! Creates task_arena with certain concurrency limits + /** Sets up settings only, real construction is deferred till the first method invocation + * @arg max_concurrency specifies total number of slots in arena where threads work + * @arg reserved_for_masters specifies number of slots to be used by master threads only. + * Value of 1 is default and reflects behavior of implicit arenas. + **/ + task_arena(int max_concurrency_ = automatic, unsigned reserved_for_masters = 1) + : task_arena_base(max_concurrency_, reserved_for_masters) + {} + +#if __TBB_NUMA_SUPPORT + //! Creates task arena pinned to certain NUMA node + task_arena(const constraints& constraints_, unsigned reserved_for_masters = 1) + : task_arena_base(constraints_, reserved_for_masters) + {} + + //! Copies settings from another task_arena + task_arena(const task_arena &s) // copy settings but not the reference or instance + : task_arena_base(constraints(s.my_numa_id, s.my_max_concurrency), s.my_master_slots) + {} +#else + //! Copies settings from another task_arena + task_arena(const task_arena &s) // copy settings but not the reference or instance + : task_arena_base(s.my_max_concurrency, s.my_master_slots) + {} +#endif /*__TBB_NUMA_SUPPORT*/ + + //! Tag class used to indicate the "attaching" constructor + struct attach {}; + + //! Creates an instance of task_arena attached to the current arena of the thread + explicit task_arena( attach ) + : task_arena_base(automatic, 1) // use default settings if attach fails + { + internal_attach(); + if( my_arena ) my_initialized = true; + } + + //! Forces allocation of the resources for the task_arena as specified in constructor arguments + inline void initialize() { + if( !my_initialized ) { + internal_initialize(); + mark_initialized(); + } + } + + //! Overrides concurrency level and forces initialization of internal representation + inline void initialize(int max_concurrency_, unsigned reserved_for_masters = 1) { + // TODO: decide if this call must be thread-safe + __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena"); + if( !my_initialized ) { + my_max_concurrency = max_concurrency_; + my_master_slots = reserved_for_masters; + initialize(); + } + } + +#if __TBB_NUMA_SUPPORT + inline void initialize(constraints constraints_, unsigned reserved_for_masters = 1) { + // TODO: decide if this call must be thread-safe + __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena"); + if( !my_initialized ) { + my_numa_id = constraints_.numa_id; + my_max_concurrency = constraints_.max_concurrency; + my_master_slots = reserved_for_masters; + initialize(); + } + } +#endif /*__TBB_NUMA_SUPPORT*/ + + //! Attaches this instance to the current arena of the thread + inline void initialize(attach) { + // TODO: decide if this call must be thread-safe + __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena"); + if( !my_initialized ) { + internal_attach(); + if ( !my_arena ) internal_initialize(); + mark_initialized(); + } + } + + //! Removes the reference to the internal arena representation. + //! Not thread safe wrt concurrent invocations of other methods. + inline void terminate() { + if( my_initialized ) { + internal_terminate(); + my_initialized = false; + } + } + + //! Removes the reference to the internal arena representation, and destroys the external object. + //! Not thread safe wrt concurrent invocations of other methods. + ~task_arena() { + terminate(); + } + + //! Returns true if the arena is active (initialized); false otherwise. + //! The name was chosen to match a task_scheduler_init method with the same semantics. + bool is_active() const { return my_initialized; } + + //! Enqueues a task into the arena to process a functor, and immediately returns. + //! Does not require the calling thread to join the arena + +#if __TBB_CPP11_RVALUE_REF_PRESENT + template + void enqueue( F&& f ) { + enqueue_impl(std::forward(f)); + } +#else + template + void enqueue( const F& f ) { + enqueue_impl(f); + } +#endif + +#if __TBB_TASK_PRIORITY + //! Enqueues a task with priority p into the arena to process a functor f, and immediately returns. + //! Does not require the calling thread to join the arena + template +#if __TBB_CPP11_RVALUE_REF_PRESENT + __TBB_DEPRECATED void enqueue( F&& f, priority_t p ) { +#if __TBB_PREVIEW_CRITICAL_TASKS + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high + || p == internal::priority_critical, "Invalid priority level value"); +#else + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value"); +#endif + enqueue_impl(std::forward(f), p); + } +#else + __TBB_DEPRECATED void enqueue( const F& f, priority_t p ) { +#if __TBB_PREVIEW_CRITICAL_TASKS + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high + || p == internal::priority_critical, "Invalid priority level value"); +#else + __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value"); +#endif + enqueue_impl(f,p); + } +#endif +#endif// __TBB_TASK_PRIORITY + + //! Joins the arena and executes a mutable functor, then returns + //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion + //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). + template + typename internal::return_type_or_void::type execute(F& f) { + return execute_impl::type>(f); + } + + //! Joins the arena and executes a constant functor, then returns + //! If not possible to join, wraps the functor into a task, enqueues it and waits for task completion + //! Can decrement the arena demand for workers, causing a worker to leave and free a slot to the calling thread + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). + template + typename internal::return_type_or_void::type execute(const F& f) { + return execute_impl::type>(f); + } + +#if __TBB_EXTRA_DEBUG + //! Wait for all work in the arena to be completed + //! Even submitted by other application threads + //! Joins arena if/when possible (in the same way as execute()) + void debug_wait_until_empty() { + initialize(); + internal_wait(); + } +#endif //__TBB_EXTRA_DEBUG + + //! Returns the index, aka slot number, of the calling thread in its current arena + //! This method is deprecated and replaced with this_task_arena::current_thread_index() + inline static int current_thread_index() { + return internal_current_slot(); + } + + //! Returns the maximal number of threads that can work inside the arena + inline int max_concurrency() const { + // Handle special cases inside the library + return (my_max_concurrency>1) ? my_max_concurrency : internal_max_concurrency(this); + } +}; + +namespace this_task_arena { +#if __TBB_TASK_ISOLATION + //! Executes a mutable functor in isolation within the current task arena. + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). + template + typename internal::return_type_or_void::type isolate(F& f) { + return internal::isolate_impl::type>(f); + } + + //! Executes a constant functor in isolation within the current task arena. + //! Since C++11, the method returns the value returned by functor (prior to C++11 it returns void). + template + typename internal::return_type_or_void::type isolate(const F& f) { + return internal::isolate_impl::type>(f); + } +#endif /* __TBB_TASK_ISOLATION */ +} // namespace this_task_arena +} // namespace interfaceX + +using interface7::task_arena; + +namespace this_task_arena { + using namespace interface7::this_task_arena; + + //! Returns the index, aka slot number, of the calling thread in its current arena + inline int current_thread_index() { + int idx = tbb::task_arena::current_thread_index(); + return idx == -1 ? tbb::task_arena::not_initialized : idx; + } + + //! Returns the maximal number of threads that can work inside the arena + inline int max_concurrency() { + return tbb::task_arena::internal_max_concurrency(NULL); + } +} // namespace this_task_arena + +//! Enqueue task in task_arena +#if __TBB_TASK_PRIORITY +void task::enqueue( task& t, task_arena& arena, priority_t p ) { +#else +void task::enqueue( task& t, task_arena& arena ) { + intptr_t p = 0; +#endif + arena.initialize(); + //! Note: the context of the task may differ from the context instantiated by task_arena + arena.internal_enqueue(t, p); +} +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_arena_H_include_area + +#endif /* __TBB_task_arena_H */ diff --git a/ohos/arm64-v8a/include/tbb/task_group.h b/ohos/arm64-v8a/include/tbb/task_group.h new file mode 100644 index 00000000..f090d3a2 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/task_group.h @@ -0,0 +1,366 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_task_group_H +#define __TBB_task_group_H + +#define __TBB_task_group_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "task.h" +#include "tbb_exception.h" +#include "internal/_template_helpers.h" +#if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION +#include "task_arena.h" +#endif + +#if __TBB_TASK_GROUP_CONTEXT + +namespace tbb { + +namespace internal { + template class task_handle_task; +} + +class task_group; +class structured_task_group; +#if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION +class isolated_task_group; +#endif + +template +class task_handle : internal::no_assign { + template friend class internal::task_handle_task; + friend class task_group; + friend class structured_task_group; +#if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION + friend class isolated_task_group; +#endif + + static const intptr_t scheduled = 0x1; + + F my_func; + intptr_t my_state; + + void mark_scheduled () { + // The check here is intentionally lax to avoid the impact of interlocked operation + if ( my_state & scheduled ) + internal::throw_exception( internal::eid_invalid_multiple_scheduling ); + my_state |= scheduled; + } +public: + task_handle( const F& f ) : my_func(f), my_state(0) {} +#if __TBB_CPP11_RVALUE_REF_PRESENT + task_handle( F&& f ) : my_func( std::move(f)), my_state(0) {} +#endif + + void operator() () const { my_func(); } +}; + +enum task_group_status { + not_complete, + complete, + canceled +}; + +namespace internal { + +template +class task_handle_task : public task { + task_handle& my_handle; + task* execute() __TBB_override { + my_handle(); + return NULL; + } +public: + task_handle_task( task_handle& h ) : my_handle(h) { h.mark_scheduled(); } +}; + +class task_group_base : internal::no_copy { + class ref_count_guard : internal::no_copy { + task& my_task; + public: + ref_count_guard(task& t) : my_task(t) { + my_task.increment_ref_count(); + } + ~ref_count_guard() { + my_task.decrement_ref_count(); + } + }; +protected: + empty_task* my_root; + task_group_context my_context; + + template + task_group_status internal_run_and_wait( F& f ) { + __TBB_TRY { + if ( !my_context.is_group_execution_cancelled() ) { + // We need to increase the reference count of the root task to notify waiters that + // this task group has some work in progress. + ref_count_guard guard(*my_root); + f(); + } + } __TBB_CATCH( ... ) { + my_context.register_pending_exception(); + } + return wait(); + } + + template + task* prepare_task( __TBB_FORWARDING_REF(F) f ) { + return new( task::allocate_additional_child_of(*my_root) ) Task( internal::forward(f) ); + } + +public: + task_group_base( uintptr_t traits = 0 ) + : my_context(task_group_context::bound, task_group_context::default_traits | traits) + { + my_root = new( task::allocate_root(my_context) ) empty_task; + my_root->set_ref_count(1); + } + + ~task_group_base() __TBB_NOEXCEPT(false) { + if( my_root->ref_count() > 1 ) { +#if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT + bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0; +#else + bool stack_unwinding_in_progress = std::uncaught_exception(); +#endif + // Always attempt to do proper cleanup to avoid inevitable memory corruption + // in case of missing wait (for the sake of better testability & debuggability) + if ( !is_canceling() ) + cancel(); + __TBB_TRY { + my_root->wait_for_all(); + } __TBB_CATCH (...) { + task::destroy(*my_root); + __TBB_RETHROW(); + } + task::destroy(*my_root); + if ( !stack_unwinding_in_progress ) + internal::throw_exception( internal::eid_missing_wait ); + } + else { + task::destroy(*my_root); + } + } + + template + void run( task_handle& h ) { + task::spawn( *prepare_task< internal::task_handle_task >(h) ); + } + + task_group_status wait() { + __TBB_TRY { + my_root->wait_for_all(); + } __TBB_CATCH( ... ) { + my_context.reset(); + __TBB_RETHROW(); + } + if ( my_context.is_group_execution_cancelled() ) { + // TODO: the reset method is not thread-safe. Ensure the correct behavior. + my_context.reset(); + return canceled; + } + return complete; + } + + bool is_canceling() { + return my_context.is_group_execution_cancelled(); + } + + void cancel() { + my_context.cancel_group_execution(); + } +}; // class task_group_base + +} // namespace internal + +class task_group : public internal::task_group_base { +public: + task_group () : task_group_base( task_group_context::concurrent_wait ) {} + +#if __SUNPRO_CC + template + void run( task_handle& h ) { + internal_run< internal::task_handle_task >( h ); + } +#else + using task_group_base::run; +#endif + +#if __TBB_CPP11_RVALUE_REF_PRESENT + template + void run( F&& f ) { + task::spawn( *prepare_task< internal::function_task< typename internal::strip::type > >(std::forward(f)) ); + } +#else + template + void run(const F& f) { + task::spawn( *prepare_task< internal::function_task >(f) ); + } +#endif + + template + task_group_status run_and_wait( const F& f ) { + return internal_run_and_wait( f ); + } + + // TODO: add task_handle rvalues support + template + task_group_status run_and_wait( task_handle& h ) { + h.mark_scheduled(); + return internal_run_and_wait< task_handle >( h ); + } +}; // class task_group + +class __TBB_DEPRECATED structured_task_group : public internal::task_group_base { +public: + // TODO: add task_handle rvalues support + template + task_group_status run_and_wait ( task_handle& h ) { + h.mark_scheduled(); + return internal_run_and_wait< task_handle >( h ); + } + + task_group_status wait() { + task_group_status res = task_group_base::wait(); + my_root->set_ref_count(1); + return res; + } +}; // class structured_task_group + +#if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION +namespace internal { + using interface7::internal::delegate_base; + using interface7::internal::isolate_within_arena; + + class spawn_delegate : public delegate_base { + task* task_to_spawn; + void operator()() const __TBB_override { + task::spawn(*task_to_spawn); + } + public: + spawn_delegate(task* a_task) : task_to_spawn(a_task) {} + }; + + class wait_delegate : public delegate_base { + void operator()() const __TBB_override { + status = tg.wait(); + } + protected: + task_group& tg; + task_group_status& status; + public: + wait_delegate(task_group& a_group, task_group_status& tgs) + : tg(a_group), status(tgs) {} + }; + + template + class run_wait_delegate : public wait_delegate { + F& func; + void operator()() const __TBB_override { + status = tg.run_and_wait( func ); + } + public: + run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs) + : wait_delegate(a_group, tgs), func(a_func) {} + }; +} // namespace internal + +class isolated_task_group : public task_group { + intptr_t this_isolation() { + return reinterpret_cast(this); + } +public: + isolated_task_group () : task_group() {} + +#if __TBB_CPP11_RVALUE_REF_PRESENT + template + void run( F&& f ) { + internal::spawn_delegate sd( + prepare_task< internal::function_task< typename internal::strip::type > >(std::forward(f)) + ); + internal::isolate_within_arena( sd, this_isolation() ); + } +#else + template + void run(const F& f) { + internal::spawn_delegate sd( prepare_task< internal::function_task >(f) ); + internal::isolate_within_arena( sd, this_isolation() ); + } +#endif + + template + task_group_status run_and_wait( const F& f ) { + task_group_status result = not_complete; + internal::run_wait_delegate< const F > rwd( *this, f, result ); + internal::isolate_within_arena( rwd, this_isolation() ); + __TBB_ASSERT( result!=not_complete, "premature exit from wait?" ); + return result; + } + + // TODO: add task_handle rvalues support + template + void run( task_handle& h ) { + internal::spawn_delegate sd( prepare_task< internal::task_handle_task >(h) ); + internal::isolate_within_arena( sd, this_isolation() ); + } + + template + task_group_status run_and_wait ( task_handle& h ) { + task_group_status result = not_complete; + internal::run_wait_delegate< task_handle > rwd( *this, h, result ); + internal::isolate_within_arena( rwd, this_isolation() ); + __TBB_ASSERT( result!=not_complete, "premature exit from wait?" ); + return result; + } + + task_group_status wait() { + task_group_status result = not_complete; + internal::wait_delegate wd( *this, result ); + internal::isolate_within_arena( wd, this_isolation() ); + __TBB_ASSERT( result!=not_complete, "premature exit from wait?" ); + return result; + } +}; // class isolated_task_group +#endif // TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION + +inline +bool is_current_task_group_canceling() { + return task::self().is_cancelled(); +} + +#if __TBB_CPP11_RVALUE_REF_PRESENT +template +task_handle< typename internal::strip::type > make_task( F&& f ) { + return task_handle< typename internal::strip::type >( std::forward(f) ); +} +#else +template +task_handle make_task( const F& f ) { + return task_handle( f ); +} +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + +} // namespace tbb + +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_group_H_include_area + +#endif /* __TBB_task_group_H */ diff --git a/ohos/arm64-v8a/include/tbb/task_scheduler_init.h b/ohos/arm64-v8a/include/tbb/task_scheduler_init.h new file mode 100644 index 00000000..c025454b --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/task_scheduler_init.h @@ -0,0 +1,174 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_task_scheduler_init_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_task_scheduler_init_H +#pragma message("TBB Warning: tbb/task_scheduler_init.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_task_scheduler_init_H +#define __TBB_task_scheduler_init_H + +#define __TBB_task_scheduler_init_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_stddef.h" +#include "limits.h" +#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE +#include // nothrow_t +#endif + +namespace tbb { + +typedef std::size_t stack_size_type; + +//! @cond INTERNAL +namespace internal { + //! Internal to library. Should not be used by clients. + /** @ingroup task_scheduling */ + class scheduler; +} // namespace internal +//! @endcond + +//! Class delimiting the scope of task scheduler activity. +/** A thread can construct a task_scheduler_init object and keep it alive + while it uses TBB's tasking subsystem (including parallel algorithms). + + This class allows to customize properties of the TBB task pool to some extent. + For example it can limit concurrency level of parallel work initiated by the + given thread. It also can be used to specify stack size of the TBB worker threads, + though this setting is not effective if the thread pool has already been created. + + If a parallel construct is used without task_scheduler_init object previously + created, the scheduler will be initialized automatically with default settings, + and will persist until this thread exits. Default concurrency level is defined + as described in task_scheduler_init::initialize(). + @ingroup task_scheduling */ +class __TBB_DEPRECATED_IN_VERBOSE_MODE task_scheduler_init: internal::no_copy { + enum ExceptionPropagationMode { + propagation_mode_exact = 1u, + propagation_mode_captured = 2u, + propagation_mode_mask = propagation_mode_exact | propagation_mode_captured + }; + + /** NULL if not currently initialized. */ + internal::scheduler* my_scheduler; + + bool internal_terminate( bool blocking ); +#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE + bool __TBB_EXPORTED_METHOD internal_blocking_terminate( bool throwing ); +#endif +public: + + //! Typedef for number of threads that is automatic. + static const int automatic = -1; + + //! Argument to initialize() or constructor that causes initialization to be deferred. + static const int deferred = -2; + + //! Ensure that scheduler exists for this thread + /** A value of -1 lets TBB decide on the number of threads, which is usually + maximal hardware concurrency for this process, that is the number of logical + CPUs on the machine (possibly limited by the processor affinity mask of this + process (Windows) or of this thread (Linux, FreeBSD). It is preferable option + for production code because it helps to avoid nasty surprises when several + TBB based components run side-by-side or in a nested fashion inside the same + process. + + The number_of_threads is ignored if any other task_scheduler_inits + currently exist. A thread may construct multiple task_scheduler_inits. + Doing so does no harm because the underlying scheduler is reference counted. */ + void __TBB_EXPORTED_METHOD initialize( int number_of_threads=automatic ); + + //! The overloaded method with stack size parameter + /** Overloading is necessary to preserve ABI compatibility */ + void __TBB_EXPORTED_METHOD initialize( int number_of_threads, stack_size_type thread_stack_size ); + + //! Inverse of method initialize. + void __TBB_EXPORTED_METHOD terminate(); + +#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE +#if TBB_USE_EXCEPTIONS + //! terminate() that waits for worker threads termination. Throws exception on error. + void blocking_terminate() { + internal_blocking_terminate( /*throwing=*/true ); + } +#endif + //! terminate() that waits for worker threads termination. Returns false on error. + bool blocking_terminate(const std::nothrow_t&) __TBB_NOEXCEPT(true) { + return internal_blocking_terminate( /*throwing=*/false ); + } +#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE + + //! Shorthand for default constructor followed by call to initialize(number_of_threads). + task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0 ) : my_scheduler(NULL) + { + // Two lowest order bits of the stack size argument may be taken to communicate + // default exception propagation mode of the client to be used when the + // client manually creates tasks in the master thread and does not use + // explicit task group context object. This is necessary because newer + // TBB binaries with exact propagation enabled by default may be used + // by older clients that expect tbb::captured_exception wrapper. + // All zeros mean old client - no preference. + __TBB_ASSERT( !(thread_stack_size & propagation_mode_mask), "Requested stack size is not aligned" ); +#if TBB_USE_EXCEPTIONS + thread_stack_size |= TBB_USE_CAPTURED_EXCEPTION ? propagation_mode_captured : propagation_mode_exact; +#endif /* TBB_USE_EXCEPTIONS */ + initialize( number_of_threads, thread_stack_size ); + } + + //! Destroy scheduler for this thread if thread has no other live task_scheduler_inits. + ~task_scheduler_init() { + if( my_scheduler ) + terminate(); + internal::poison_pointer( my_scheduler ); + } + //! Returns the number of threads TBB scheduler would create if initialized by default. + /** Result returned by this method does not depend on whether the scheduler + has already been initialized. + + Because tbb 2.0 does not support blocking tasks yet, you may use this method + to boost the number of threads in the tbb's internal pool, if your tasks are + doing I/O operations. The optimal number of additional threads depends on how + much time your tasks spend in the blocked state. + + Before TBB 3.0 U4 this method returned the number of logical CPU in the + system. Currently on Windows, Linux and FreeBSD it returns the number of + logical CPUs available to the current process in accordance with its affinity + mask. + + NOTE: The return value of this method never changes after its first invocation. + This means that changes in the process affinity mask that took place after + this method was first invoked will not affect the number of worker threads + in the TBB worker threads pool. */ + static int __TBB_EXPORTED_FUNC default_num_threads (); + + //! Returns true if scheduler is active (initialized); false otherwise + bool is_active() const { return my_scheduler != NULL; } +}; + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_scheduler_init_H_include_area + +#endif /* __TBB_task_scheduler_init_H */ diff --git a/ohos/arm64-v8a/include/tbb/task_scheduler_observer.h b/ohos/arm64-v8a/include/tbb/task_scheduler_observer.h new file mode 100644 index 00000000..1bb93636 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/task_scheduler_observer.h @@ -0,0 +1,166 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_task_scheduler_observer_H +#define __TBB_task_scheduler_observer_H + +#define __TBB_task_scheduler_observer_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "atomic.h" +#if __TBB_ARENA_OBSERVER +#include "task_arena.h" +#endif + +#if __TBB_SCHEDULER_OBSERVER + +namespace tbb { +namespace interface6 { +class task_scheduler_observer; +} +namespace internal { + +class observer_proxy; +class observer_list; + +class task_scheduler_observer_v3 { + friend class observer_proxy; + friend class observer_list; + friend class interface6::task_scheduler_observer; + + //! Pointer to the proxy holding this observer. + /** Observers are proxied by the scheduler to maintain persistent lists of them. **/ + observer_proxy* my_proxy; + + //! Counter preventing the observer from being destroyed while in use by the scheduler. + /** Valid only when observation is on. **/ + atomic my_busy_count; + +public: + //! Enable or disable observation + /** For local observers the method can be used only when the current thread + has the task scheduler initialized or is attached to an arena. + + Repeated calls with the same state are no-ops. **/ + void __TBB_EXPORTED_METHOD observe( bool state=true ); + + //! Returns true if observation is enabled, false otherwise. + bool is_observing() const {return my_proxy!=NULL;} + + //! Construct observer with observation disabled. + task_scheduler_observer_v3() : my_proxy(NULL) { my_busy_count.store(0); } + + //! Entry notification + /** Invoked from inside observe(true) call and whenever a worker enters the arena + this observer is associated with. If a thread is already in the arena when + the observer is activated, the entry notification is called before it + executes the first stolen task. + + Obsolete semantics. For global observers it is called by a thread before + the first steal since observation became enabled. **/ + virtual void on_scheduler_entry( bool /*is_worker*/ ) {} + + //! Exit notification + /** Invoked from inside observe(false) call and whenever a worker leaves the + arena this observer is associated with. + + Obsolete semantics. For global observers it is called by a thread before + the first steal since observation became enabled. **/ + virtual void on_scheduler_exit( bool /*is_worker*/ ) {} + + //! Destructor automatically switches observation off if it is enabled. + virtual ~task_scheduler_observer_v3() { if(my_proxy) observe(false);} +}; + +} // namespace internal + +#if __TBB_ARENA_OBSERVER +namespace interface6 { +class task_scheduler_observer : public internal::task_scheduler_observer_v3 { + friend class internal::task_scheduler_observer_v3; + friend class internal::observer_proxy; + friend class internal::observer_list; + + /** Negative numbers with the largest absolute value to minimize probability + of coincidence in case of a bug in busy count usage. **/ + // TODO: take more high bits for version number + static const intptr_t v6_trait = (intptr_t)((~(uintptr_t)0 >> 1) + 1); + + //! contains task_arena pointer or tag indicating local or global semantics of the observer + intptr_t my_context_tag; + enum { global_tag = 0, implicit_tag = 1 }; + +public: + //! Construct local or global observer in inactive state (observation disabled). + /** For a local observer entry/exit notifications are invoked whenever a worker + thread joins/leaves the arena of the observer's owner thread. If a thread is + already in the arena when the observer is activated, the entry notification is + called before it executes the first stolen task. **/ + /** TODO: Obsolete. + Global observer semantics is obsolete as it violates master thread isolation + guarantees and is not composable. Thus the current default behavior of the + constructor is obsolete too and will be changed in one of the future versions + of the library. **/ + explicit task_scheduler_observer( bool local = false ) { +#if __TBB_ARENA_OBSERVER + my_context_tag = local? implicit_tag : global_tag; +#else + __TBB_ASSERT_EX( !local, NULL ); + my_context_tag = global_tag; +#endif + } + +#if __TBB_ARENA_OBSERVER + //! Construct local observer for a given arena in inactive state (observation disabled). + /** entry/exit notifications are invoked whenever a thread joins/leaves arena. + If a thread is already in the arena when the observer is activated, the entry notification + is called before it executes the first stolen task. **/ + explicit task_scheduler_observer( task_arena & a) { + my_context_tag = (intptr_t)&a; + } +#endif /* __TBB_ARENA_OBSERVER */ + + /** Destructor protects instance of the observer from concurrent notification. + It is recommended to disable observation before destructor of a derived class starts, + otherwise it can lead to concurrent notification callback on partly destroyed object **/ + virtual ~task_scheduler_observer() { if(my_proxy) observe(false); } + + //! Enable or disable observation + /** Warning: concurrent invocations of this method are not safe. + Repeated calls with the same state are no-ops. **/ + void observe( bool state=true ) { + if( state && !my_proxy ) { + __TBB_ASSERT( !my_busy_count, "Inconsistent state of task_scheduler_observer instance"); + my_busy_count.store(v6_trait); + } + internal::task_scheduler_observer_v3::observe(state); + } +}; + +} //namespace interface6 +using interface6::task_scheduler_observer; +#else /*__TBB_ARENA_OBSERVER*/ +typedef tbb::internal::task_scheduler_observer_v3 task_scheduler_observer; +#endif /*__TBB_ARENA_OBSERVER*/ + +} // namespace tbb + +#endif /* __TBB_SCHEDULER_OBSERVER */ + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_task_scheduler_observer_H_include_area + +#endif /* __TBB_task_scheduler_observer_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbb.h b/ohos/arm64-v8a/include/tbb/tbb.h new file mode 100644 index 00000000..f06ec5a3 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb.h @@ -0,0 +1,97 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbb_H +#define __TBB_tbb_H + +#if (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) && !defined(__TBB_INTERNAL_INCLUDES_DEPRECATION_MESSAGE) +#pragma message("TBB Warning: tbb.h contains deprecated functionality. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#define __TBB_tbb_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +/** + This header bulk-includes declarations or definitions of all the functionality + provided by TBB (save for tbbmalloc and 3rd party dependent headers). + + If you use only a few TBB constructs, consider including specific headers only. + Any header listed below can be included independently of others. +**/ + +#if TBB_PREVIEW_AGGREGATOR +#include "aggregator.h" +#endif +#include "aligned_space.h" +#include "atomic.h" +#include "blocked_range.h" +#include "blocked_range2d.h" +#include "blocked_range3d.h" +#if TBB_PREVIEW_BLOCKED_RANGE_ND +#include "blocked_rangeNd.h" +#endif +#include "cache_aligned_allocator.h" +#include "combinable.h" +#include "concurrent_hash_map.h" +#if TBB_PREVIEW_CONCURRENT_LRU_CACHE +#include "concurrent_lru_cache.h" +#endif +#include "concurrent_priority_queue.h" +#include "concurrent_queue.h" +#include "concurrent_unordered_map.h" +#include "concurrent_unordered_set.h" +#if TBB_PREVIEW_CONCURRENT_ORDERED_CONTAINERS +#include "concurrent_map.h" +#include "concurrent_set.h" +#endif +#include "concurrent_vector.h" +#include "critical_section.h" +#include "enumerable_thread_specific.h" +#include "flow_graph.h" +#include "global_control.h" +#include "iterators.h" +#include "mutex.h" +#include "null_mutex.h" +#include "null_rw_mutex.h" +#include "parallel_do.h" +#include "parallel_for.h" +#include "parallel_for_each.h" +#include "parallel_invoke.h" +#include "parallel_reduce.h" +#include "parallel_scan.h" +#include "parallel_sort.h" +#include "partitioner.h" +#include "pipeline.h" +#include "queuing_mutex.h" +#include "queuing_rw_mutex.h" +#include "reader_writer_lock.h" +#include "recursive_mutex.h" +#include "spin_mutex.h" +#include "spin_rw_mutex.h" +#include "task.h" +#include "task_arena.h" +#include "task_group.h" +#include "task_scheduler_init.h" +#include "task_scheduler_observer.h" +#include "tbb_allocator.h" +#include "tbb_exception.h" +#include "tbb_thread.h" +#include "tick_count.h" + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_tbb_H_include_area + +#endif /* __TBB_tbb_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbb_allocator.h b/ohos/arm64-v8a/include/tbb/tbb_allocator.h new file mode 100644 index 00000000..ac0d1a6b --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb_allocator.h @@ -0,0 +1,203 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbb_allocator_H +#define __TBB_tbb_allocator_H + +#include "tbb_stddef.h" +#include +#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + #include // std::forward +#endif +#include + +namespace tbb { + +//! @cond INTERNAL +namespace internal { + + //! Deallocates memory using FreeHandler + /** The function uses scalable_free if scalable allocator is available and free if not*/ + void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ); + + //! Allocates memory using MallocHandler + /** The function uses scalable_malloc if scalable allocator is available and malloc if not*/ + void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ); + + //! Returns true if standard malloc/free are used to work with memory. + bool __TBB_EXPORTED_FUNC is_malloc_used_v3(); +} +//! @endcond + +#if _MSC_VER && !defined(__INTEL_COMPILER) + // Workaround for erroneous "unreferenced parameter" warning in method destroy. + #pragma warning (push) + #pragma warning (disable: 4100) +#endif + +//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 +/** The class selects the best memory allocation mechanism available + from scalable_malloc and standard malloc. + The members are ordered the same way they are in section 20.4.1 + of the ISO C++ standard. + @ingroup memory_allocation */ +template +class tbb_allocator { +public: + typedef typename internal::allocator_type::value_type value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template struct rebind { + typedef tbb_allocator other; + }; + + //! Specifies current allocator + enum malloc_type { + scalable, + standard + }; + + tbb_allocator() throw() {} + tbb_allocator( const tbb_allocator& ) throw() {} + template tbb_allocator(const tbb_allocator&) throw() {} + + pointer address(reference x) const {return &x;} + const_pointer address(const_reference x) const {return &x;} + + //! Allocate space for n objects. + pointer allocate( size_type n, const void* /*hint*/ = 0) { + return pointer(internal::allocate_via_handler_v3( n * sizeof(value_type) )); + } + + //! Free previously allocated block of memory. + void deallocate( pointer p, size_type ) { + internal::deallocate_via_handler_v3(p); + } + + //! Largest value for which method allocate might succeed. + size_type max_size() const throw() { + size_type max = static_cast(-1) / sizeof (value_type); + return (max > 0 ? max : 1); + } + + //! Copy-construct value at location pointed to by p. +#if __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + template + void construct(U *p, Args&&... args) + { ::new((void *)p) U(std::forward(args)...); } +#else // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC +#if __TBB_CPP11_RVALUE_REF_PRESENT + void construct( pointer p, value_type&& value ) {::new((void*)(p)) value_type(std::move(value));} +#endif + void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} +#endif // __TBB_ALLOCATOR_CONSTRUCT_VARIADIC + + //! Destroy value at location pointed to by p. + void destroy( pointer p ) {p->~value_type();} + + //! Returns current allocator + static malloc_type allocator_type() { + return internal::is_malloc_used_v3() ? standard : scalable; + } +}; + +#if _MSC_VER && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif // warning 4100 is back + +//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 +/** @ingroup memory_allocation */ +template<> +class tbb_allocator { +public: + typedef void* pointer; + typedef const void* const_pointer; + typedef void value_type; + template struct rebind { + typedef tbb_allocator other; + }; +}; + +template +inline bool operator==( const tbb_allocator&, const tbb_allocator& ) {return true;} + +template +inline bool operator!=( const tbb_allocator&, const tbb_allocator& ) {return false;} + +//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 +/** The class is an adapter over an actual allocator that fills the allocation + using memset function with template argument C as the value. + The members are ordered the same way they are in section 20.4.1 + of the ISO C++ standard. + @ingroup memory_allocation */ +template class Allocator = tbb_allocator> +class zero_allocator : public Allocator +{ +public: + typedef Allocator base_allocator_type; + typedef typename base_allocator_type::value_type value_type; + typedef typename base_allocator_type::pointer pointer; + typedef typename base_allocator_type::const_pointer const_pointer; + typedef typename base_allocator_type::reference reference; + typedef typename base_allocator_type::const_reference const_reference; + typedef typename base_allocator_type::size_type size_type; + typedef typename base_allocator_type::difference_type difference_type; + template struct rebind { + typedef zero_allocator other; + }; + + zero_allocator() throw() { } + zero_allocator(const zero_allocator &a) throw() : base_allocator_type( a ) { } + template + zero_allocator(const zero_allocator &a) throw() : base_allocator_type( Allocator( a ) ) { } + + pointer allocate(const size_type n, const void *hint = 0 ) { + pointer ptr = base_allocator_type::allocate( n, hint ); + std::memset( static_cast(ptr), 0, n * sizeof(value_type) ); + return ptr; + } +}; + +//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 +/** @ingroup memory_allocation */ +template class Allocator> +class zero_allocator : public Allocator { +public: + typedef Allocator base_allocator_type; + typedef typename base_allocator_type::value_type value_type; + typedef typename base_allocator_type::pointer pointer; + typedef typename base_allocator_type::const_pointer const_pointer; + template struct rebind { + typedef zero_allocator other; + }; +}; + +template class B1, typename T2, template class B2> +inline bool operator==( const zero_allocator &a, const zero_allocator &b) { + return static_cast< B1 >(a) == static_cast< B2 >(b); +} +template class B1, typename T2, template class B2> +inline bool operator!=( const zero_allocator &a, const zero_allocator &b) { + return static_cast< B1 >(a) != static_cast< B2 >(b); +} + +} // namespace tbb + +#endif /* __TBB_tbb_allocator_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbb_config.h b/ohos/arm64-v8a/include/tbb/tbb_config.h new file mode 100644 index 00000000..13d19f49 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb_config.h @@ -0,0 +1,873 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbb_config_H +#define __TBB_tbb_config_H + +/** This header is supposed to contain macro definitions and C style comments only. + The macros defined here are intended to control such aspects of TBB build as + - presence of compiler features + - compilation modes + - feature sets + - known compiler/platform issues +**/ + +/* This macro marks incomplete code or comments describing ideas which are considered for the future. + * See also for plain comment with TODO and FIXME marks for small improvement opportunities. + */ +#define __TBB_TODO 0 + +/* Check which standard library we use. */ +/* __TBB_SYMBOL is defined only while processing exported symbols list where C++ is not allowed. */ +#if !defined(__TBB_SYMBOL) && !__TBB_CONFIG_PREPROC_ONLY + #include +#endif + +// Note that when ICC or Clang is in use, __TBB_GCC_VERSION might not fully match +// the actual GCC version on the system. +#define __TBB_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + +// Prior to GCC 7, GNU libstdc++ did not have a convenient version macro. +// Therefore we use different ways to detect its version. +#if defined(TBB_USE_GLIBCXX_VERSION) && !defined(_GLIBCXX_RELEASE) +// The version is explicitly specified in our public TBB_USE_GLIBCXX_VERSION macro. +// Its format should match the __TBB_GCC_VERSION above, e.g. 70301 for libstdc++ coming with GCC 7.3.1. +#define __TBB_GLIBCXX_VERSION TBB_USE_GLIBCXX_VERSION +#elif _GLIBCXX_RELEASE && _GLIBCXX_RELEASE != __GNUC__ +// Reported versions of GCC and libstdc++ do not match; trust the latter +#define __TBB_GLIBCXX_VERSION (_GLIBCXX_RELEASE*10000) +#elif __GLIBCPP__ || __GLIBCXX__ +// The version macro is not defined or matches the GCC version; use __TBB_GCC_VERSION +#define __TBB_GLIBCXX_VERSION __TBB_GCC_VERSION +#endif + +#if __clang__ + // according to clang documentation, version can be vendor specific + #define __TBB_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) +#endif + +/** Target OS is either iOS* or iOS* simulator **/ +#if __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ + #define __TBB_IOS 1 +#endif + +#if __APPLE__ + #if __INTEL_COMPILER && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ > 1099 \ + && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101000 + // ICC does not correctly set the macro if -mmacosx-min-version is not specified + #define __TBB_MACOS_TARGET_VERSION (100000 + 10*(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 1000)) + #else + #define __TBB_MACOS_TARGET_VERSION __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ + #endif +#endif + +/** Preprocessor symbols to determine HW architecture **/ + +#if _WIN32||_WIN64 +# if defined(_M_X64)||defined(__x86_64__) // the latter for MinGW support +# define __TBB_x86_64 1 +# elif defined(_M_IA64) +# define __TBB_ipf 1 +# elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support +# define __TBB_x86_32 1 +# else +# define __TBB_generic_arch 1 +# endif +#else /* Assume generic Unix */ +# if !__linux__ && !__APPLE__ +# define __TBB_generic_os 1 +# endif +# if __TBB_IOS +# define __TBB_generic_arch 1 +# elif __x86_64__ +# define __TBB_x86_64 1 +# elif __ia64__ +# define __TBB_ipf 1 +# elif __i386__||__i386 // __i386 is for Sun OS +# define __TBB_x86_32 1 +# else +# define __TBB_generic_arch 1 +# endif +#endif + +#if __MIC__ || __MIC2__ +#define __TBB_DEFINE_MIC 1 +#endif + +#define __TBB_TSX_AVAILABLE ((__TBB_x86_32 || __TBB_x86_64) && !__TBB_DEFINE_MIC) + +/** Presence of compiler features **/ + +#if __INTEL_COMPILER == 9999 && __INTEL_COMPILER_BUILD_DATE == 20110811 +/* Intel(R) Composer XE 2011 Update 6 incorrectly sets __INTEL_COMPILER. Fix it. */ + #undef __INTEL_COMPILER + #define __INTEL_COMPILER 1210 +#endif + +#if __clang__ && !__INTEL_COMPILER +#define __TBB_USE_OPTIONAL_RTTI __has_feature(cxx_rtti) +#elif defined(_CPPRTTI) +#define __TBB_USE_OPTIONAL_RTTI 1 +#else +#define __TBB_USE_OPTIONAL_RTTI (__GXX_RTTI || __RTTI || __INTEL_RTTI__) +#endif + +#if __TBB_GCC_VERSION >= 40400 && !defined(__INTEL_COMPILER) + /** warning suppression pragmas available in GCC since 4.4 **/ + #define __TBB_GCC_WARNING_SUPPRESSION_PRESENT 1 +#endif + +/* Select particular features of C++11 based on compiler version. + ICC 12.1 (Linux*), GCC 4.3 and higher, clang 2.9 and higher + set __GXX_EXPERIMENTAL_CXX0X__ in c++11 mode. + + Compilers that mimics other compilers (ICC, clang) must be processed before + compilers they mimic (GCC, MSVC). + + TODO: The following conditions should be extended when new compilers/runtimes + support added. + */ + +/** + __TBB_CPP11_PRESENT macro indicates that the compiler supports vast majority of C++11 features. + Depending on the compiler, some features might still be unsupported or work incorrectly. + Use it when enabling C++11 features individually is not practical, and be aware that + some "good enough" compilers might be excluded. **/ +#define __TBB_CPP11_PRESENT (__cplusplus >= 201103L || _MSC_VER >= 1900) + +#define __TBB_CPP17_FALLTHROUGH_PRESENT (__cplusplus >= 201703L) +#define __TBB_FALLTHROUGH_PRESENT (__TBB_GCC_VERSION >= 70000 && !__INTEL_COMPILER) + +/** C++11 mode detection macros for Intel(R) C++ Compiler (enabled by -std=c++XY option): + __INTEL_CXX11_MODE__ for version >=13.0 (not available for ICC 15.0 if -std=c++14 is used), + __STDC_HOSTED__ for version >=12.0 (useful only on Windows), + __GXX_EXPERIMENTAL_CXX0X__ for version >=12.0 on Linux and macOS. **/ +#if __INTEL_COMPILER && !__INTEL_CXX11_MODE__ + // __INTEL_CXX11_MODE__ is not set, try to deduce it + #define __INTEL_CXX11_MODE__ (__GXX_EXPERIMENTAL_CXX0X__ || (_MSC_VER && __STDC_HOSTED__)) +#endif + +#if __INTEL_COMPILER && (!_MSC_VER || __INTEL_CXX11_MODE__) + // On Windows, C++11 features supported by Visual Studio 2010 and higher are enabled by default, + // so in absence of /Qstd= use MSVC branch for feature detection. + // On other platforms, no -std= means C++03. + + #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (__INTEL_CXX11_MODE__ && __VARIADIC_TEMPLATES) + // Both r-value reference support in compiler and std::move/std::forward + // presence in C++ standard library is checked. + #define __TBB_CPP11_RVALUE_REF_PRESENT ((_MSC_VER >= 1700 || __GXX_EXPERIMENTAL_CXX0X__ && (__TBB_GLIBCXX_VERSION >= 40500 || _LIBCPP_VERSION)) && __INTEL_COMPILER >= 1400) + #define __TBB_IMPLICIT_MOVE_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1900 || __TBB_GCC_VERSION >= 40600 || __clang__)) + #if _MSC_VER >= 1600 + #define __TBB_EXCEPTION_PTR_PRESENT ( __INTEL_COMPILER > 1300 \ + /*ICC 12.1 Upd 10 and 13 beta Upd 2 fixed exception_ptr linking issue*/ \ + || (__INTEL_COMPILER == 1300 && __INTEL_COMPILER_BUILD_DATE >= 20120530) \ + || (__INTEL_COMPILER == 1210 && __INTEL_COMPILER_BUILD_DATE >= 20120410) ) + /** libstdc++ that comes with GCC 4.6 use C++11 features not supported by ICC 12.1. + * Because of that ICC 12.1 does not support C++11 mode with gcc 4.6 (or higher), + * and therefore does not define __GXX_EXPERIMENTAL_CXX0X__ macro **/ + #elif __TBB_GLIBCXX_VERSION >= 40404 && __TBB_GLIBCXX_VERSION < 40600 + #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1200) + #elif __TBB_GLIBCXX_VERSION >= 40600 + #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1300) + #elif _LIBCPP_VERSION + #define __TBB_EXCEPTION_PTR_PRESENT __GXX_EXPERIMENTAL_CXX0X__ + #else + #define __TBB_EXCEPTION_PTR_PRESENT 0 + #endif + #define __TBB_STATIC_ASSERT_PRESENT (__INTEL_CXX11_MODE__ || _MSC_VER >= 1600) + #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && (__TBB_GLIBCXX_VERSION >= 40300 || _LIBCPP_VERSION)) + #define __TBB_INITIALIZER_LISTS_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400 && (_MSC_VER >= 1800 || __TBB_GLIBCXX_VERSION >= 40400 || _LIBCPP_VERSION)) + #define __TBB_CONSTEXPR_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400) + #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1200) + /** ICC seems to disable support of noexcept event in c++11 when compiling in compatibility mode for gcc <4.6 **/ + #define __TBB_NOEXCEPT_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1300 && (__TBB_GLIBCXX_VERSION >= 40600 || _LIBCPP_VERSION || _MSC_VER)) + #define __TBB_CPP11_STD_BEGIN_END_PRESENT (_MSC_VER >= 1700 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1310 && (__TBB_GLIBCXX_VERSION >= 40600 || _LIBCPP_VERSION)) + #define __TBB_CPP11_AUTO_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210) + #define __TBB_CPP11_DECLTYPE_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210) + #define __TBB_CPP11_LAMBDAS_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1200) + #define __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT (_MSC_VER >= 1800 || __GXX_EXPERIMENTAL_CXX0X__ && __INTEL_COMPILER >= 1210) + #define __TBB_OVERRIDE_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1400) + #define __TBB_ALIGNAS_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1500) + #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT (__INTEL_CXX11_MODE__ && __INTEL_COMPILER >= 1210) + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__INTEL_COMPILER > 1910) // a future version + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (__cplusplus >= 201703L) +#elif __clang__ +/** TODO: these options need to be rechecked **/ + #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __has_feature(__cxx_variadic_templates__) + #define __TBB_CPP11_RVALUE_REF_PRESENT (__has_feature(__cxx_rvalue_references__) && (_LIBCPP_VERSION || __TBB_GLIBCXX_VERSION >= 40500)) + #define __TBB_IMPLICIT_MOVE_PRESENT __has_feature(cxx_implicit_moves) +/** TODO: extend exception_ptr related conditions to cover libstdc++ **/ + #define __TBB_EXCEPTION_PTR_PRESENT (__cplusplus >= 201103L && (_LIBCPP_VERSION || __TBB_GLIBCXX_VERSION >= 40600)) + #define __TBB_STATIC_ASSERT_PRESENT __has_feature(__cxx_static_assert__) + #if (__cplusplus >= 201103L && __has_include()) + #define __TBB_CPP11_TUPLE_PRESENT 1 + #endif + #if (__has_feature(__cxx_generalized_initializers__) && __has_include()) + #define __TBB_INITIALIZER_LISTS_PRESENT 1 + #endif + #define __TBB_CONSTEXPR_PRESENT __has_feature(__cxx_constexpr__) + #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__has_feature(__cxx_defaulted_functions__) && __has_feature(__cxx_deleted_functions__)) + /**For some unknown reason __has_feature(__cxx_noexcept) does not yield true for all cases. Compiler bug ? **/ + #define __TBB_NOEXCEPT_PRESENT (__cplusplus >= 201103L) + #define __TBB_CPP11_STD_BEGIN_END_PRESENT (__has_feature(__cxx_range_for__) && (_LIBCPP_VERSION || __TBB_GLIBCXX_VERSION >= 40600)) + #define __TBB_CPP11_AUTO_PRESENT __has_feature(__cxx_auto_type__) + #define __TBB_CPP11_DECLTYPE_PRESENT __has_feature(__cxx_decltype__) + #define __TBB_CPP11_LAMBDAS_PRESENT __has_feature(cxx_lambdas) + #define __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT __has_feature(cxx_default_function_template_args) + #define __TBB_OVERRIDE_PRESENT __has_feature(cxx_override_control) + #define __TBB_ALIGNAS_PRESENT __has_feature(cxx_alignas) + #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT __has_feature(cxx_alias_templates) + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__has_feature(cxx_variable_templates)) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__has_feature(__cpp_deduction_guides)) + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (__has_feature(__cpp_lib_is_invocable)) +#elif __GNUC__ + #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __GXX_EXPERIMENTAL_CXX0X__ + #define __TBB_CPP11_VARIADIC_FIXED_LENGTH_EXP_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700) + #define __TBB_CPP11_RVALUE_REF_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40500) + #define __TBB_IMPLICIT_MOVE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) + /** __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 here is a substitution for _GLIBCXX_ATOMIC_BUILTINS_4, which is a prerequisite + for exception_ptr but cannot be used in this file because it is defined in a header, not by the compiler. + If the compiler has no atomic intrinsics, the C++ library should not expect those as well. **/ + #define __TBB_EXCEPTION_PTR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40404 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) + #define __TBB_STATIC_ASSERT_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300) + #define __TBB_CPP11_TUPLE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300) + #define __TBB_INITIALIZER_LISTS_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) + /** gcc seems have to support constexpr from 4.4 but tests in (test_atomic) seeming reasonable fail to compile prior 4.6**/ + #define __TBB_CONSTEXPR_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) + #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) + #define __TBB_NOEXCEPT_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) + #define __TBB_CPP11_STD_BEGIN_END_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40600) + #define __TBB_CPP11_AUTO_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) + #define __TBB_CPP11_DECLTYPE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40400) + #define __TBB_CPP11_LAMBDAS_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40500) + #define __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40300) + #define __TBB_OVERRIDE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700) + #define __TBB_ALIGNAS_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40800) + #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GCC_VERSION >= 40700) + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L && __TBB_GCC_VERSION >= 50000) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L && __TBB_GCC_VERSION >= 50000) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__cpp_deduction_guides >= 201606L) + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (__cplusplus >= 201703L && __TBB_GCC_VERSION >= 70000) +#elif _MSC_VER + // These definitions are also used with Intel C++ Compiler in "default" mode (__INTEL_CXX11_MODE__ == 0); + // see a comment in "__INTEL_COMPILER" section above. + + #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT (_MSC_VER >= 1800) + // Contains a workaround for ICC 13 + #define __TBB_CPP11_RVALUE_REF_PRESENT (_MSC_VER >= 1700 && (!__INTEL_COMPILER || __INTEL_COMPILER >= 1400)) + #define __TBB_IMPLICIT_MOVE_PRESENT (_MSC_VER >= 1900) + #define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600) + #define __TBB_STATIC_ASSERT_PRESENT (_MSC_VER >= 1600) + #define __TBB_CPP11_TUPLE_PRESENT (_MSC_VER >= 1600) + #define __TBB_INITIALIZER_LISTS_PRESENT (_MSC_VER >= 1800) + #define __TBB_CONSTEXPR_PRESENT (_MSC_VER >= 1900) + #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT (_MSC_VER >= 1800) + #define __TBB_NOEXCEPT_PRESENT (_MSC_VER >= 1900) + #define __TBB_CPP11_STD_BEGIN_END_PRESENT (_MSC_VER >= 1700) + #define __TBB_CPP11_AUTO_PRESENT (_MSC_VER >= 1600) + #define __TBB_CPP11_DECLTYPE_PRESENT (_MSC_VER >= 1600) + #define __TBB_CPP11_LAMBDAS_PRESENT (_MSC_VER >= 1600) + #define __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT (_MSC_VER >= 1800) + #define __TBB_OVERRIDE_PRESENT (_MSC_VER >= 1700) + #define __TBB_ALIGNAS_PRESENT (_MSC_VER >= 1900) + #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT (_MSC_VER >= 1800) + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (_MSC_VER >= 1900) + /* Variable templates are supported in VS2015 Update 2 or later */ + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (_MSC_FULL_VER >= 190023918 && (!__INTEL_COMPILER || __INTEL_COMPILER >= 1700)) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (_MSVC_LANG >= 201703L && _MSC_VER >= 1914) + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (_MSVC_LANG >= 201703L && _MSC_VER >= 1911) +#else + #define __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_RVALUE_REF_PRESENT __TBB_CPP11_PRESENT + #define __TBB_IMPLICIT_MOVE_PRESENT __TBB_CPP11_PRESENT + #define __TBB_EXCEPTION_PTR_PRESENT __TBB_CPP11_PRESENT + #define __TBB_STATIC_ASSERT_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_TUPLE_PRESENT __TBB_CPP11_PRESENT + #define __TBB_INITIALIZER_LISTS_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CONSTEXPR_PRESENT __TBB_CPP11_PRESENT + #define __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT __TBB_CPP11_PRESENT + #define __TBB_NOEXCEPT_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_STD_BEGIN_END_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_AUTO_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_DECLTYPE_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_LAMBDAS_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT __TBB_CPP11_PRESENT + #define __TBB_OVERRIDE_PRESENT __TBB_CPP11_PRESENT + #define __TBB_ALIGNAS_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP11_TEMPLATE_ALIASES_PRESENT __TBB_CPP11_PRESENT + #define __TBB_CPP14_INTEGER_SEQUENCE_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP14_VARIABLE_TEMPLATES_PRESENT (__cplusplus >= 201402L) + #define __TBB_CPP17_DEDUCTION_GUIDES_PRESENT (__cplusplus >= 201703L) + #define __TBB_CPP17_INVOKE_RESULT_PRESENT (__cplusplus >= 201703L) +#endif + +// C++11 standard library features + +#define __TBB_CPP11_ARRAY_PRESENT (_MSC_VER >= 1700 || _LIBCPP_VERSION || __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40300) + +#ifndef __TBB_CPP11_VARIADIC_FIXED_LENGTH_EXP_PRESENT +#define __TBB_CPP11_VARIADIC_FIXED_LENGTH_EXP_PRESENT __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +#endif +#define __TBB_CPP11_VARIADIC_TUPLE_PRESENT (!_MSC_VER || _MSC_VER >= 1800) + +#define __TBB_CPP11_TYPE_PROPERTIES_PRESENT (_LIBCPP_VERSION || _MSC_VER >= 1700 || (__TBB_GLIBCXX_VERSION >= 50000 && __GXX_EXPERIMENTAL_CXX0X__)) +// GCC supported some of type properties since 4.7 +#define __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT (__GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40700 || __TBB_CPP11_TYPE_PROPERTIES_PRESENT) + +// In GCC, std::move_if_noexcept appeared later than noexcept +#define __TBB_MOVE_IF_NOEXCEPT_PRESENT (__TBB_NOEXCEPT_PRESENT && (__TBB_GLIBCXX_VERSION >= 40700 || _MSC_VER >= 1900 || _LIBCPP_VERSION)) +#define __TBB_ALLOCATOR_TRAITS_PRESENT (__cplusplus >= 201103L && _LIBCPP_VERSION || _MSC_VER >= 1800 || \ + __GXX_EXPERIMENTAL_CXX0X__ && __TBB_GLIBCXX_VERSION >= 40700 && !(__TBB_GLIBCXX_VERSION == 40700 && __TBB_DEFINE_MIC)) +#define __TBB_MAKE_EXCEPTION_PTR_PRESENT (__TBB_EXCEPTION_PTR_PRESENT && (_MSC_VER >= 1700 || __TBB_GLIBCXX_VERSION >= 40600 || _LIBCPP_VERSION || __SUNPRO_CC)) + +// Due to libc++ limitations in C++03 mode, do not pass rvalues to std::make_shared() +#define __TBB_CPP11_SMART_POINTERS_PRESENT ( _MSC_VER >= 1600 || _LIBCPP_VERSION \ + || ((__cplusplus >= 201103L || __GXX_EXPERIMENTAL_CXX0X__) \ + && (__TBB_GLIBCXX_VERSION >= 40500 || __TBB_GLIBCXX_VERSION >= 40400 && __TBB_USE_OPTIONAL_RTTI)) ) + +#define __TBB_CPP11_FUTURE_PRESENT (_MSC_VER >= 1700 || __TBB_GLIBCXX_VERSION >= 40600 && __GXX_EXPERIMENTAL_CXX0X__ || _LIBCPP_VERSION) + +#define __TBB_CPP11_GET_NEW_HANDLER_PRESENT (_MSC_VER >= 1900 || __TBB_GLIBCXX_VERSION >= 40900 && __GXX_EXPERIMENTAL_CXX0X__ || _LIBCPP_VERSION) + +#define __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT (_MSC_VER >= 1900 || __GLIBCXX__ && __cpp_lib_uncaught_exceptions \ + || _LIBCPP_VERSION >= 3700 && (!__TBB_MACOS_TARGET_VERSION || __TBB_MACOS_TARGET_VERSION >= 101200)) +// TODO: wait when memory_resource will be fully supported in clang and define the right macro +// Currently it is in experimental stage since 6 version. +#define __TBB_CPP17_MEMORY_RESOURCE_PRESENT (_MSC_VER >= 1913 && (_MSVC_LANG > 201402L || __cplusplus > 201402L) || \ + __GLIBCXX__ && __cpp_lib_memory_resource >= 201603) +#define __TBB_CPP17_HW_INTERFERENCE_SIZE_PRESENT (_MSC_VER >= 1911) +// std::swap is in only since C++11, though MSVC had it at least since VS2005 +#if _MSC_VER>=1400 || _LIBCPP_VERSION || __GXX_EXPERIMENTAL_CXX0X__ +#define __TBB_STD_SWAP_HEADER +#else +#define __TBB_STD_SWAP_HEADER +#endif + +//TODO: not clear how exactly this macro affects exception_ptr - investigate +// On linux ICC fails to find existing std::exception_ptr in libstdc++ without this define +#if __INTEL_COMPILER && __GNUC__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) + #define __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 1 +#endif + +// Work around a bug in MinGW32 +#if __MINGW32__ && __TBB_EXCEPTION_PTR_PRESENT && !defined(_GLIBCXX_ATOMIC_BUILTINS_4) + #define _GLIBCXX_ATOMIC_BUILTINS_4 +#endif + +#if __GNUC__ || __SUNPRO_CC || __IBMCPP__ + /* ICC defines __GNUC__ and so is covered */ + #define __TBB_ATTRIBUTE_ALIGNED_PRESENT 1 +#elif _MSC_VER && (_MSC_VER >= 1300 || __INTEL_COMPILER) + #define __TBB_DECLSPEC_ALIGN_PRESENT 1 +#endif + +/* Actually ICC supports gcc __sync_* intrinsics starting 11.1, + * but 64 bit support for 32 bit target comes in later ones*/ +/* TODO: change the version back to 4.1.2 once macro __TBB_WORD_SIZE become optional */ +/* Assumed that all clang versions have these gcc compatible intrinsics. */ +#if __TBB_GCC_VERSION >= 40306 || __INTEL_COMPILER >= 1200 || __clang__ + /** built-in atomics available in GCC since 4.1.2 **/ + #define __TBB_GCC_BUILTIN_ATOMICS_PRESENT 1 +#endif + +#if __TBB_GCC_VERSION >= 70000 && !__INTEL_COMPILER && !__clang__ + // After GCC7 there was possible reordering problem in generic atomic load/store operations. + // So always using builtins. + #define TBB_USE_GCC_BUILTINS 1 +#endif + +#if __INTEL_COMPILER >= 1200 + /** built-in C++11 style atomics available in ICC since 12.0 **/ + #define __TBB_ICC_BUILTIN_ATOMICS_PRESENT 1 +#endif + +#if _MSC_VER>=1600 && (!__INTEL_COMPILER || __INTEL_COMPILER>=1310) + #define __TBB_MSVC_PART_WORD_INTERLOCKED_INTRINSICS_PRESENT 1 +#endif + +#define __TBB_TSX_INTRINSICS_PRESENT ((__RTM__ || _MSC_VER>=1700 || __INTEL_COMPILER>=1300) && !__TBB_DEFINE_MIC && !__ANDROID__ && !__OHOS__) + +/** Macro helpers **/ +#define __TBB_CONCAT_AUX(A,B) A##B +// The additional level of indirection is needed to expand macros A and B (not to get the AB macro). +// See [cpp.subst] and [cpp.concat] for more details. +#define __TBB_CONCAT(A,B) __TBB_CONCAT_AUX(A,B) +// The IGNORED argument and comma are needed to always have 2 arguments (even when A is empty). +#define __TBB_IS_MACRO_EMPTY(A,IGNORED) __TBB_CONCAT_AUX(__TBB_MACRO_EMPTY,A) +#define __TBB_MACRO_EMPTY 1 + +/** User controlled TBB features & modes **/ +#ifndef TBB_USE_DEBUG +/* +There are four cases that are supported: + 1. "_DEBUG is undefined" means "no debug"; + 2. "_DEBUG defined to something that is evaluated to 0" (including "garbage", as per [cpp.cond]) means "no debug"; + 3. "_DEBUG defined to something that is evaluated to a non-zero value" means "debug"; + 4. "_DEBUG defined to nothing (empty)" means "debug". +*/ +#ifdef _DEBUG +// Check if _DEBUG is empty. +#define __TBB_IS__DEBUG_EMPTY (__TBB_IS_MACRO_EMPTY(_DEBUG,IGNORED)==__TBB_MACRO_EMPTY) +#if __TBB_IS__DEBUG_EMPTY +#define TBB_USE_DEBUG 1 +#else +#define TBB_USE_DEBUG _DEBUG +#endif /* __TBB_IS__DEBUG_EMPTY */ +#else +#define TBB_USE_DEBUG 0 +#endif +#endif /* TBB_USE_DEBUG */ + +#ifndef TBB_USE_ASSERT +#define TBB_USE_ASSERT TBB_USE_DEBUG +#endif /* TBB_USE_ASSERT */ + +#ifndef TBB_USE_THREADING_TOOLS +#define TBB_USE_THREADING_TOOLS TBB_USE_DEBUG +#endif /* TBB_USE_THREADING_TOOLS */ + +#ifndef TBB_USE_PERFORMANCE_WARNINGS +#ifdef TBB_PERFORMANCE_WARNINGS +#define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS +#else +#define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG +#endif /* TBB_PERFORMANCE_WARNINGS */ +#endif /* TBB_USE_PERFORMANCE_WARNINGS */ + +#if __TBB_DEFINE_MIC + #if TBB_USE_EXCEPTIONS + #error The platform does not properly support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. + #elif !defined(TBB_USE_EXCEPTIONS) + #define TBB_USE_EXCEPTIONS 0 + #endif +#elif !(__EXCEPTIONS || defined(_CPPUNWIND) || __SUNPRO_CC) + #if TBB_USE_EXCEPTIONS + #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. + #elif !defined(TBB_USE_EXCEPTIONS) + #define TBB_USE_EXCEPTIONS 0 + #endif +#elif !defined(TBB_USE_EXCEPTIONS) + #define TBB_USE_EXCEPTIONS 1 +#endif + +#ifndef TBB_IMPLEMENT_CPP0X +/** By default, use C++11 classes if available **/ + #if __clang__ + /* Old versions of Intel C++ Compiler do not have __has_include or cannot use it in #define */ + #if (__INTEL_COMPILER && (__INTEL_COMPILER < 1500 || __INTEL_COMPILER == 1500 && __INTEL_COMPILER_UPDATE <= 1)) + #define TBB_IMPLEMENT_CPP0X (__cplusplus < 201103L || !_LIBCPP_VERSION) + #else + #define TBB_IMPLEMENT_CPP0X (__cplusplus < 201103L || (!__has_include() && !__has_include())) + #endif + #elif __GNUC__ + #define TBB_IMPLEMENT_CPP0X (__TBB_GCC_VERSION < 40400 || !__GXX_EXPERIMENTAL_CXX0X__) + #elif _MSC_VER + #define TBB_IMPLEMENT_CPP0X (_MSC_VER < 1700) + #else + // TODO: Reconsider general approach to be more reliable, e.g. (!(__cplusplus >= 201103L && __ STDC_HOSTED__)) + #define TBB_IMPLEMENT_CPP0X (!__STDCPP_THREADS__) + #endif +#endif /* TBB_IMPLEMENT_CPP0X */ + +/* TBB_USE_CAPTURED_EXCEPTION should be explicitly set to either 0 or 1, as it is used as C++ const */ +#ifndef TBB_USE_CAPTURED_EXCEPTION + /** IA-64 architecture pre-built TBB binaries do not support exception_ptr. **/ + #if __TBB_EXCEPTION_PTR_PRESENT && !defined(__ia64__) + #define TBB_USE_CAPTURED_EXCEPTION 0 + #else + #define TBB_USE_CAPTURED_EXCEPTION 1 + #endif +#else /* defined TBB_USE_CAPTURED_EXCEPTION */ + #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT + #error Current runtime does not support std::exception_ptr. Set TBB_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb::captured_exception. + #endif +#endif /* defined TBB_USE_CAPTURED_EXCEPTION */ + +/** Check whether the request to use GCC atomics can be satisfied **/ +#if TBB_USE_GCC_BUILTINS && !__TBB_GCC_BUILTIN_ATOMICS_PRESENT + #error "GCC atomic built-ins are not supported." +#endif + +/** Internal TBB features & modes **/ + +/** __TBB_CONCURRENT_ORDERED_CONTAINERS indicates that all conditions of use + * concurrent_map and concurrent_set are met. **/ +// TODO: Add cpp11 random generation macro +#ifndef __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT + #define __TBB_CONCURRENT_ORDERED_CONTAINERS_PRESENT ( __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \ + && __TBB_IMPLICIT_MOVE_PRESENT && __TBB_CPP11_AUTO_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT && __TBB_CPP11_ARRAY_PRESENT \ + && __TBB_INITIALIZER_LISTS_PRESENT ) +#endif + +/** __TBB_WEAK_SYMBOLS_PRESENT denotes that the system supports the weak symbol mechanism **/ +#ifndef __TBB_WEAK_SYMBOLS_PRESENT +#define __TBB_WEAK_SYMBOLS_PRESENT ( !_WIN32 && !__APPLE__ && !__sun && (__TBB_GCC_VERSION >= 40000 || __INTEL_COMPILER ) ) +#endif + +/** __TBB_DYNAMIC_LOAD_ENABLED describes the system possibility to load shared libraries at run time **/ +#ifndef __TBB_DYNAMIC_LOAD_ENABLED + #define __TBB_DYNAMIC_LOAD_ENABLED 1 +#endif + +/** __TBB_SOURCE_DIRECTLY_INCLUDED is a mode used in whitebox testing when + it's necessary to test internal functions not exported from TBB DLLs +**/ +#if (_WIN32||_WIN64) && (__TBB_SOURCE_DIRECTLY_INCLUDED || TBB_USE_PREVIEW_BINARY) + #define __TBB_NO_IMPLICIT_LINKAGE 1 + #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 +#endif + +#ifndef __TBB_COUNT_TASK_NODES + #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT +#endif + +#ifndef __TBB_TASK_GROUP_CONTEXT + #define __TBB_TASK_GROUP_CONTEXT 1 +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +#ifndef __TBB_SCHEDULER_OBSERVER + #define __TBB_SCHEDULER_OBSERVER 1 +#endif /* __TBB_SCHEDULER_OBSERVER */ + +#ifndef __TBB_FP_CONTEXT + #define __TBB_FP_CONTEXT __TBB_TASK_GROUP_CONTEXT +#endif /* __TBB_FP_CONTEXT */ + +#if __TBB_FP_CONTEXT && !__TBB_TASK_GROUP_CONTEXT + #error __TBB_FP_CONTEXT requires __TBB_TASK_GROUP_CONTEXT to be enabled +#endif + +#define __TBB_RECYCLE_TO_ENQUEUE __TBB_BUILD // keep non-official + +#ifndef __TBB_ARENA_OBSERVER + #define __TBB_ARENA_OBSERVER __TBB_SCHEDULER_OBSERVER +#endif /* __TBB_ARENA_OBSERVER */ + +#ifndef __TBB_TASK_ISOLATION + #define __TBB_TASK_ISOLATION 1 +#endif /* __TBB_TASK_ISOLATION */ + +#if TBB_USE_EXCEPTIONS && !__TBB_TASK_GROUP_CONTEXT + #error TBB_USE_EXCEPTIONS requires __TBB_TASK_GROUP_CONTEXT to be enabled +#endif + +#ifndef __TBB_TASK_PRIORITY + #define __TBB_TASK_PRIORITY (__TBB_TASK_GROUP_CONTEXT) +#endif /* __TBB_TASK_PRIORITY */ + +#if __TBB_TASK_PRIORITY && !__TBB_TASK_GROUP_CONTEXT + #error __TBB_TASK_PRIORITY requires __TBB_TASK_GROUP_CONTEXT to be enabled +#endif + +#if TBB_PREVIEW_NUMA_SUPPORT || __TBB_BUILD + #define __TBB_NUMA_SUPPORT 1 +#endif + +#if TBB_PREVIEW_WAITING_FOR_WORKERS || __TBB_BUILD + #define __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE 1 +#endif + +#ifndef __TBB_ENQUEUE_ENFORCED_CONCURRENCY + #define __TBB_ENQUEUE_ENFORCED_CONCURRENCY 1 +#endif + +#if !defined(__TBB_SURVIVE_THREAD_SWITCH) && \ + (_WIN32 || _WIN64 || __APPLE__ || (__linux__ && !__ANDROID__ && !__OHOS__)) + #define __TBB_SURVIVE_THREAD_SWITCH 1 +#endif /* __TBB_SURVIVE_THREAD_SWITCH */ + +#ifndef __TBB_DEFAULT_PARTITIONER +#define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner +#endif + +#ifndef __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES +#define __TBB_USE_PROPORTIONAL_SPLIT_IN_BLOCKED_RANGES 1 +#endif + +#ifndef __TBB_ENABLE_RANGE_FEEDBACK +#define __TBB_ENABLE_RANGE_FEEDBACK 0 +#endif + +#ifdef _VARIADIC_MAX + #define __TBB_VARIADIC_MAX _VARIADIC_MAX +#else + #if _MSC_VER == 1700 + #define __TBB_VARIADIC_MAX 5 // VS11 setting, issue resolved in VS12 + #elif _MSC_VER == 1600 + #define __TBB_VARIADIC_MAX 10 // VS10 setting + #else + #define __TBB_VARIADIC_MAX 15 + #endif +#endif + +// Intel C++ Compiler starts analyzing usages of the deprecated content at the template +// instantiation site, which is too late for suppression of the corresponding messages for internal +// stuff. +#if !defined(__INTEL_COMPILER) && (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) + #if (__cplusplus >= 201402L) + #define __TBB_DEPRECATED [[deprecated]] + #define __TBB_DEPRECATED_MSG(msg) [[deprecated(msg)]] + #elif _MSC_VER + #define __TBB_DEPRECATED __declspec(deprecated) + #define __TBB_DEPRECATED_MSG(msg) __declspec(deprecated(msg)) + #elif (__GNUC__ && __TBB_GCC_VERSION >= 40805) || __clang__ + #define __TBB_DEPRECATED __attribute__((deprecated)) + #define __TBB_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) + #endif +#endif // !defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + +#if !defined(__TBB_DEPRECATED) + #define __TBB_DEPRECATED + #define __TBB_DEPRECATED_MSG(msg) +#elif !defined(__TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES) + // Suppress deprecated messages from self + #define __TBB_SUPPRESS_INTERNAL_DEPRECATED_MESSAGES 1 +#endif + +#if defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) && (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + #define __TBB_DEPRECATED_IN_VERBOSE_MODE __TBB_DEPRECATED + #define __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG(msg) __TBB_DEPRECATED_MSG(msg) +#else + #define __TBB_DEPRECATED_IN_VERBOSE_MODE + #define __TBB_DEPRECATED_IN_VERBOSE_MODE_MSG(msg) +#endif // (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0) + +#if (!defined(TBB_SUPPRESS_DEPRECATED_MESSAGES) || (TBB_SUPPRESS_DEPRECATED_MESSAGES == 0)) && !__TBB_CPP11_PRESENT + #pragma message("TBB Warning: Support for C++98/03 is deprecated. Please use the compiler that supports C++11 features at least.") +#endif + +/** __TBB_WIN8UI_SUPPORT enables support of Windows* Store Apps and limit a possibility to load + shared libraries at run time only from application container **/ +// TODO: Separate this single macro into two for Windows 8 Store* (win8ui mode) and UWP/UWD modes. +#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_FAMILY_APP + #define __TBB_WIN8UI_SUPPORT 1 +#else + #define __TBB_WIN8UI_SUPPORT 0 +#endif + +/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by + the bugs in compilers, standard or OS specific libraries. They should be + removed as soon as the corresponding bugs are fixed or the buggy OS/compiler + versions go out of the support list. +**/ + +#if __SIZEOF_POINTER__ < 8 && (__ANDROID__ || __OHOS__) && __TBB_GCC_VERSION <= 40403 && !__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 + /** Necessary because on Android 8-byte CAS and F&A are not available for some processor architectures, + but no mandatory warning message appears from GCC 4.4.3. Instead, only a linkage error occurs when + these atomic operations are used (such as in unit test test_atomic.exe). **/ + #define __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 1 +#elif __TBB_x86_32 && __TBB_GCC_VERSION == 40102 && ! __GNUC_RH_RELEASE__ + /** GCC 4.1.2 erroneously emit call to external function for 64 bit sync_ intrinsics. + However these functions are not defined anywhere. It seems that this problem was fixed later on + and RHEL got an updated version of gcc 4.1.2. **/ + #define __TBB_GCC_64BIT_ATOMIC_BUILTINS_BROKEN 1 +#endif + +#if __GNUC__ && __TBB_x86_64 && __INTEL_COMPILER == 1200 + #define __TBB_ICC_12_0_INL_ASM_FSTCW_BROKEN 1 +#endif + +#if _MSC_VER && __INTEL_COMPILER && (__INTEL_COMPILER<1110 || __INTEL_COMPILER==1110 && __INTEL_COMPILER_BUILD_DATE < 20091012) + /** Necessary to avoid ICL error (or warning in non-strict mode): + "exception specification for implicitly declared virtual destructor is + incompatible with that of overridden one". **/ + #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 +#endif + +#if !__INTEL_COMPILER && (_MSC_VER && _MSC_VER < 1500 || __GNUC__ && __TBB_GCC_VERSION < 40102) + /** gcc 3.4.6 (and earlier) and VS2005 (and earlier) do not allow declaring template class as a friend + of classes defined in other namespaces. **/ + #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 +#endif + +#if __GLIBC__==2 && __GLIBC_MINOR__==3 || (__APPLE__ && ( __INTEL_COMPILER==1200 && !TBB_USE_DEBUG)) + /** Macro controlling EH usages in TBB tests. + Some older versions of glibc crash when exception handling happens concurrently. **/ + #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 +#endif + +#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 + /** That's a bug in Intel C++ Compiler 11.1.044/IA-32 architecture/Windows* OS, that leads to a worker thread crash on the thread's startup. **/ + #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 +#endif + +#if __clang__ || (__GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMPILER)) + /** Bugs with access to nested classes declared in protected area */ + #define __TBB_PROTECTED_NESTED_CLASS_BROKEN 1 +#endif + +#if __MINGW32__ && __TBB_GCC_VERSION < 40200 + /** MinGW has a bug with stack alignment for routines invoked from MS RTLs. + Since GCC 4.2, the bug can be worked around via a special attribute. **/ + #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1 +#endif + +#if __TBB_GCC_VERSION==40300 && !__INTEL_COMPILER && !__clang__ + /* GCC of this version may rashly ignore control dependencies */ + #define __TBB_GCC_OPTIMIZER_ORDERING_BROKEN 1 +#endif + +#if __FreeBSD__ + /** A bug in FreeBSD 8.0 results in kernel panic when there is contention + on a mutex created with this attribute. **/ + #define __TBB_PRIO_INHERIT_BROKEN 1 + + /** A bug in FreeBSD 8.0 results in test hanging when an exception occurs + during (concurrent?) object construction by means of placement new operator. **/ + #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 +#endif /* __FreeBSD__ */ + +#if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) + /** The Intel C++ Compiler for IA-32 architecture (Linux* OS|macOS) crashes or generates + incorrect code when __asm__ arguments have a cast to volatile. **/ + #define __TBB_ICC_ASM_VOLATILE_BROKEN 1 +#endif + +#if !__INTEL_COMPILER && (_MSC_VER && _MSC_VER < 1700 || __GNUC__==3 && __GNUC_MINOR__<=2) + /** Bug in GCC 3.2 and MSVC compilers that sometimes return 0 for __alignof(T) + when T has not yet been instantiated. **/ + #define __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN 1 +#endif + +#if __TBB_DEFINE_MIC + /** Main thread and user's thread have different default thread affinity masks. **/ + #define __TBB_MAIN_THREAD_AFFINITY_BROKEN 1 +#endif + +#if __GXX_EXPERIMENTAL_CXX0X__ && !defined(__EXCEPTIONS) && \ + ((!__INTEL_COMPILER && !__clang__ && (__TBB_GCC_VERSION>=40400 && __TBB_GCC_VERSION<40600)) || \ + (__INTEL_COMPILER<=1400 && (__TBB_GLIBCXX_VERSION>=40400 && __TBB_GLIBCXX_VERSION<=40801))) +/* There is an issue for specific GCC toolchain when C++11 is enabled + and exceptions are disabled: + exceprion_ptr.h/nested_exception.h use throw unconditionally. + GCC can ignore 'throw' since 4.6; but with ICC the issue still exists. + */ + #define __TBB_LIBSTDCPP_EXCEPTION_HEADERS_BROKEN 1 +#endif + +#if __INTEL_COMPILER==1300 && __TBB_GLIBCXX_VERSION>=40700 && defined(__GXX_EXPERIMENTAL_CXX0X__) +/* Some C++11 features used inside libstdc++ are not supported by Intel C++ Compiler. */ + #define __TBB_ICC_13_0_CPP11_STDLIB_SUPPORT_BROKEN 1 +#endif + +#if (__GNUC__==4 && __GNUC_MINOR__==4 ) && !defined(__INTEL_COMPILER) && !defined(__clang__) + /** excessive warnings related to strict aliasing rules in GCC 4.4 **/ + #define __TBB_GCC_STRICT_ALIASING_BROKEN 1 + /* topical remedy: #pragma GCC diagnostic ignored "-Wstrict-aliasing" */ + #if !__TBB_GCC_WARNING_SUPPRESSION_PRESENT + #error Warning suppression is not supported, while should. + #endif +#endif + +/* In a PIC mode some versions of GCC 4.1.2 generate incorrect inlined code for 8 byte __sync_val_compare_and_swap intrinsic */ +#if __TBB_GCC_VERSION == 40102 && __PIC__ && !defined(__INTEL_COMPILER) && !defined(__clang__) + #define __TBB_GCC_CAS8_BUILTIN_INLINING_BROKEN 1 +#endif + +#if __TBB_x86_32 && ( __INTEL_COMPILER || (__GNUC__==5 && __GNUC_MINOR__>=2 && __GXX_EXPERIMENTAL_CXX0X__) \ + || (__GNUC__==3 && __GNUC_MINOR__==3) || (__MINGW32__ && __GNUC__==4 && __GNUC_MINOR__==5) || __SUNPRO_CC ) + // Some compilers for IA-32 architecture fail to provide 8-byte alignment of objects on the stack, + // even if the object specifies 8-byte alignment. On such platforms, the implementation + // of 64 bit atomics for IA-32 architecture (e.g. atomic) use different tactics + // depending upon whether the object is properly aligned or not. + #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 1 +#else + // Define to 0 explicitly because the macro is used in a compiled code of test_atomic + #define __TBB_FORCE_64BIT_ALIGNMENT_BROKEN 0 +#endif + +#if __GNUC__ && !__INTEL_COMPILER && !__clang__ && __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && __TBB_GCC_VERSION < 40700 + #define __TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN 1 +#endif + +#if _MSC_VER && _MSC_VER <= 1800 && !__INTEL_COMPILER + // With MSVC, when an array is passed by const reference to a template function, + // constness from the function parameter may get propagated to the template parameter. + #define __TBB_CONST_REF_TO_ARRAY_TEMPLATE_PARAM_BROKEN 1 +#endif + +// A compiler bug: a disabled copy constructor prevents use of the moving constructor +#define __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN (_MSC_VER && (__INTEL_COMPILER >= 1300 && __INTEL_COMPILER <= 1310) && !__INTEL_CXX11_MODE__) + +#define __TBB_CPP11_DECLVAL_BROKEN (_MSC_VER == 1600 || (__GNUC__ && __TBB_GCC_VERSION < 40500) ) +// Intel C++ Compiler has difficulties with copying std::pair with VC11 std::reference_wrapper being a const member +#define __TBB_COPY_FROM_NON_CONST_REF_BROKEN (_MSC_VER == 1700 && __INTEL_COMPILER && __INTEL_COMPILER < 1600) + +// The implicit upcasting of the tuple of a reference of a derived class to a base class fails on icc 13.X if the system's gcc environment is 4.8 +// Also in gcc 4.4 standard library the implementation of the tuple<&> conversion (tuple a = tuple, B is inherited from A) is broken. +#if __GXX_EXPERIMENTAL_CXX0X__ && __GLIBCXX__ && ((__INTEL_COMPILER >=1300 && __INTEL_COMPILER <=1310 && __TBB_GLIBCXX_VERSION>=40700) || (__TBB_GLIBCXX_VERSION < 40500)) +#define __TBB_UPCAST_OF_TUPLE_OF_REF_BROKEN 1 +#endif + +// In some cases decltype of a function adds a reference to a return type. +#define __TBB_CPP11_DECLTYPE_OF_FUNCTION_RETURN_TYPE_BROKEN (_MSC_VER == 1600 && !__INTEL_COMPILER) + +// Visual Studio 2013 does not delete the copy constructor when a user-defined move constructor is provided +#if _MSC_VER && _MSC_VER <= 1800 + #define __TBB_IMPLICIT_COPY_DELETION_BROKEN 1 +#endif + +/** End of __TBB_XXX_BROKEN macro section **/ + +#if defined(_MSC_VER) && _MSC_VER>=1500 && !defined(__INTEL_COMPILER) + // A macro to suppress erroneous or benign "unreachable code" MSVC warning (4702) + #define __TBB_MSVC_UNREACHABLE_CODE_IGNORED 1 +#endif + +#define __TBB_ATOMIC_CTORS (__TBB_CONSTEXPR_PRESENT && __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT && (!__TBB_ZERO_INIT_WITH_DEFAULTED_CTOR_BROKEN)) + +// Many OS versions (Android 4.0.[0-3] for example) need workaround for dlopen to avoid non-recursive loader lock hang +// Setting the workaround for all compile targets ($APP_PLATFORM) below Android 4.4 (android-19) +#if __ANDROID__ +#include +#define __TBB_USE_DLOPEN_REENTRANCY_WORKAROUND (__ANDROID_API__ < 19) +#endif + +#define __TBB_ALLOCATOR_CONSTRUCT_VARIADIC (__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT) + +#define __TBB_VARIADIC_PARALLEL_INVOKE (TBB_PREVIEW_VARIADIC_PARALLEL_INVOKE && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT) +#define __TBB_FLOW_GRAPH_CPP11_FEATURES (__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \ + && __TBB_CPP11_SMART_POINTERS_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_AUTO_PRESENT) \ + && __TBB_CPP11_VARIADIC_TUPLE_PRESENT && __TBB_CPP11_DEFAULT_FUNC_TEMPLATE_ARGS_PRESENT \ + && !__TBB_UPCAST_OF_TUPLE_OF_REF_BROKEN +#define __TBB_PREVIEW_STREAMING_NODE (__TBB_CPP11_VARIADIC_FIXED_LENGTH_EXP_PRESENT && __TBB_FLOW_GRAPH_CPP11_FEATURES \ + && TBB_PREVIEW_FLOW_GRAPH_NODES && !TBB_IMPLEMENT_CPP0X && !__TBB_UPCAST_OF_TUPLE_OF_REF_BROKEN) +#define __TBB_PREVIEW_OPENCL_NODE (__TBB_PREVIEW_STREAMING_NODE && __TBB_CPP11_TEMPLATE_ALIASES_PRESENT) +#define __TBB_PREVIEW_MESSAGE_BASED_KEY_MATCHING (TBB_PREVIEW_FLOW_GRAPH_FEATURES || __TBB_PREVIEW_OPENCL_NODE) +#define __TBB_PREVIEW_ASYNC_MSG (TBB_PREVIEW_FLOW_GRAPH_FEATURES && __TBB_FLOW_GRAPH_CPP11_FEATURES) + + +#ifndef __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES +#define __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES TBB_PREVIEW_FLOW_GRAPH_FEATURES +#endif + +// This feature works only in combination with critical tasks (__TBB_PREVIEW_CRITICAL_TASKS) +#ifndef __TBB_PREVIEW_RESUMABLE_TASKS +#define __TBB_PREVIEW_RESUMABLE_TASKS ((__TBB_CPF_BUILD || TBB_PREVIEW_RESUMABLE_TASKS) && !__TBB_WIN8UI_SUPPORT && !__ANDROID__ && !__OHOS__ && !__TBB_ipf) +#endif + +#ifndef __TBB_PREVIEW_CRITICAL_TASKS +#define __TBB_PREVIEW_CRITICAL_TASKS (__TBB_CPF_BUILD || __TBB_PREVIEW_FLOW_GRAPH_PRIORITIES || __TBB_PREVIEW_RESUMABLE_TASKS) +#endif + +#ifndef __TBB_PREVIEW_FLOW_GRAPH_NODE_SET +#define __TBB_PREVIEW_FLOW_GRAPH_NODE_SET (TBB_PREVIEW_FLOW_GRAPH_FEATURES && __TBB_CPP11_PRESENT && __TBB_FLOW_GRAPH_CPP11_FEATURES) +#endif + +#endif /* __TBB_tbb_config_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbb_disable_exceptions.h b/ohos/arm64-v8a/include/tbb/tbb_disable_exceptions.h new file mode 100644 index 00000000..69ef5c57 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb_disable_exceptions.h @@ -0,0 +1,31 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +//! To disable use of exceptions, include this header before any other header file from the library. + +//! The macro that prevents use of exceptions in the library files +#undef TBB_USE_EXCEPTIONS +#define TBB_USE_EXCEPTIONS 0 + +//! Prevent compilers from issuing exception related warnings. +/** Note that the warnings are suppressed for all the code after this header is included. */ +#if _MSC_VER +#if __INTEL_COMPILER + #pragma warning (disable: 583) +#else + #pragma warning (disable: 4530 4577) +#endif +#endif diff --git a/ohos/arm64-v8a/include/tbb/tbb_exception.h b/ohos/arm64-v8a/include/tbb/tbb_exception.h new file mode 100644 index 00000000..3c5fb7dd --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb_exception.h @@ -0,0 +1,362 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_exception_H +#define __TBB_exception_H + +#define __TBB_tbb_exception_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_stddef.h" +#include +#include // required for bad_alloc definition, operators new +#include // required to construct std exception classes + +namespace tbb { + +//! Exception for concurrent containers +class bad_last_alloc : public std::bad_alloc { +public: + const char* what() const throw() __TBB_override; +#if __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN + ~bad_last_alloc() throw() __TBB_override {} +#endif +}; + +//! Exception for PPL locks +class __TBB_DEPRECATED improper_lock : public std::exception { +public: + const char* what() const throw() __TBB_override; +}; + +//! Exception for user-initiated abort +class user_abort : public std::exception { +public: + const char* what() const throw() __TBB_override; +}; + +//! Exception for missing wait on structured_task_group +class missing_wait : public std::exception { +public: + const char* what() const throw() __TBB_override; +}; + +//! Exception for repeated scheduling of the same task_handle +class invalid_multiple_scheduling : public std::exception { +public: + const char* what() const throw() __TBB_override; +}; + +namespace internal { +//! Obsolete +void __TBB_EXPORTED_FUNC throw_bad_last_alloc_exception_v4(); + +enum exception_id { + eid_bad_alloc = 1, + eid_bad_last_alloc, + eid_nonpositive_step, + eid_out_of_range, + eid_segment_range_error, + eid_index_range_error, + eid_missing_wait, + eid_invalid_multiple_scheduling, + eid_improper_lock, + eid_possible_deadlock, + eid_operation_not_permitted, + eid_condvar_wait_failed, + eid_invalid_load_factor, + eid_reserved, // free slot for backward compatibility, can be reused. + eid_invalid_swap, + eid_reservation_length_error, + eid_invalid_key, + eid_user_abort, + eid_reserved1, +#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE + // This id is used only from inside the library and only for support of CPF functionality. + // So, if we drop the functionality, eid_reserved1 can be safely renamed and reused. + eid_blocking_thread_join_impossible = eid_reserved1, +#endif + eid_bad_tagged_msg_cast, + //! The last enumerator tracks the number of defined IDs. It must remain the last one. + /** When adding new IDs, place them immediately _before_ this comment (that is + _after_ all the existing IDs. NEVER insert new IDs between the existing ones. **/ + eid_max +}; + +//! Gathers all throw operators in one place. +/** Its purpose is to minimize code bloat that can be caused by throw operators + scattered in multiple places, especially in templates. **/ +void __TBB_EXPORTED_FUNC throw_exception_v4 ( exception_id ); + +//! Versionless convenience wrapper for throw_exception_v4() +inline void throw_exception ( exception_id eid ) { throw_exception_v4(eid); } + +} // namespace internal +} // namespace tbb + +#if __TBB_TASK_GROUP_CONTEXT +#include "tbb_allocator.h" +#include //for typeid + +namespace tbb { + +//! Interface to be implemented by all exceptions TBB recognizes and propagates across the threads. +/** If an unhandled exception of the type derived from tbb::tbb_exception is intercepted + by the TBB scheduler in one of the worker threads, it is delivered to and re-thrown in + the root thread. The root thread is the thread that has started the outermost algorithm + or root task sharing the same task_group_context with the guilty algorithm/task (the one + that threw the exception first). + + Note: when documentation mentions workers with respect to exception handling, + masters are implied as well, because they are completely equivalent in this context. + Consequently a root thread can be master or worker thread. + + NOTE: In case of nested algorithms or complex task hierarchies when the nested + levels share (explicitly or by means of implicit inheritance) the task group + context of the outermost level, the exception may be (re-)thrown multiple times + (ultimately - in each worker on each nesting level) before reaching the root + thread at the outermost level. IMPORTANT: if you intercept an exception derived + from this class on a nested level, you must re-throw it in the catch block by means + of the "throw;" operator. + + TBB provides two implementations of this interface: tbb::captured_exception and + template class tbb::movable_exception. See their declarations for more info. **/ +class __TBB_DEPRECATED tbb_exception : public std::exception +{ + /** No operator new is provided because the TBB usage model assumes dynamic + creation of the TBB exception objects only by means of applying move() + operation on an exception thrown out of TBB scheduler. **/ + void* operator new ( size_t ); + +public: +#if __clang__ + // At -O3 or even -O2 optimization level, Clang may fully throw away an empty destructor + // of tbb_exception from destructors of derived classes. As a result, it does not create + // vtable for tbb_exception, which is a required part of TBB binary interface. + // Making the destructor non-empty (with just a semicolon) prevents that optimization. + ~tbb_exception() throw() { /* keep the semicolon! */ ; } +#endif + + //! Creates and returns pointer to the deep copy of this exception object. + /** Move semantics is allowed. **/ + virtual tbb_exception* move() throw() = 0; + + //! Destroys objects created by the move() method. + /** Frees memory and calls destructor for this exception object. + Can and must be used only on objects created by the move method. **/ + virtual void destroy() throw() = 0; + + //! Throws this exception object. + /** Make sure that if you have several levels of derivation from this interface + you implement or override this method on the most derived level. The implementation + is as simple as "throw *this;". Failure to do this will result in exception + of a base class type being thrown. **/ + virtual void throw_self() = 0; + + //! Returns RTTI name of the originally intercepted exception + virtual const char* name() const throw() = 0; + + //! Returns the result of originally intercepted exception's what() method. + virtual const char* what() const throw() __TBB_override = 0; + + /** Operator delete is provided only to allow using existing smart pointers + with TBB exception objects obtained as the result of applying move() + operation on an exception thrown out of TBB scheduler. + + When overriding method move() make sure to override operator delete as well + if memory is allocated not by TBB's scalable allocator. **/ + void operator delete ( void* p ) { + internal::deallocate_via_handler_v3(p); + } +}; + +//! This class is used by TBB to propagate information about unhandled exceptions into the root thread. +/** Exception of this type is thrown by TBB in the root thread (thread that started a parallel + algorithm ) if an unhandled exception was intercepted during the algorithm execution in one + of the workers. + \sa tbb::tbb_exception **/ +class __TBB_DEPRECATED_IN_VERBOSE_MODE captured_exception : public tbb_exception +{ +public: + captured_exception( const captured_exception& src ) + : tbb_exception(src), my_dynamic(false) + { + set(src.my_exception_name, src.my_exception_info); + } + + captured_exception( const char* name_, const char* info ) + : my_dynamic(false) + { + set(name_, info); + } + + __TBB_EXPORTED_METHOD ~captured_exception() throw(); + + captured_exception& operator= ( const captured_exception& src ) { + if ( this != &src ) { + clear(); + set(src.my_exception_name, src.my_exception_info); + } + return *this; + } + + captured_exception* __TBB_EXPORTED_METHOD move() throw() __TBB_override; + + void __TBB_EXPORTED_METHOD destroy() throw() __TBB_override; + + void throw_self() __TBB_override { __TBB_THROW(*this); } + + const char* __TBB_EXPORTED_METHOD name() const throw() __TBB_override; + + const char* __TBB_EXPORTED_METHOD what() const throw() __TBB_override; + + void __TBB_EXPORTED_METHOD set( const char* name, const char* info ) throw(); + void __TBB_EXPORTED_METHOD clear() throw(); + +private: + //! Used only by method move(). + captured_exception() : my_dynamic(), my_exception_name(), my_exception_info() {} + + //! Functionally equivalent to {captured_exception e(name,info); return e.move();} + static captured_exception* allocate( const char* name, const char* info ); + + bool my_dynamic; + const char* my_exception_name; + const char* my_exception_info; +}; + +//! Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread +/** Code using TBB can instantiate this template with an arbitrary ExceptionData type + and throw this exception object. Such exceptions are intercepted by the TBB scheduler + and delivered to the root thread (). + \sa tbb::tbb_exception **/ +template +class __TBB_DEPRECATED movable_exception : public tbb_exception +{ + typedef movable_exception self_type; + +public: + movable_exception( const ExceptionData& data_ ) + : my_exception_data(data_) + , my_dynamic(false) + , my_exception_name( +#if TBB_USE_EXCEPTIONS + typeid(self_type).name() +#else /* !TBB_USE_EXCEPTIONS */ + "movable_exception" +#endif /* !TBB_USE_EXCEPTIONS */ + ) + {} + + movable_exception( const movable_exception& src ) throw () + : tbb_exception(src) + , my_exception_data(src.my_exception_data) + , my_dynamic(false) + , my_exception_name(src.my_exception_name) + {} + + ~movable_exception() throw() {} + + const movable_exception& operator= ( const movable_exception& src ) { + if ( this != &src ) { + my_exception_data = src.my_exception_data; + my_exception_name = src.my_exception_name; + } + return *this; + } + + ExceptionData& data() throw() { return my_exception_data; } + + const ExceptionData& data() const throw() { return my_exception_data; } + + const char* name() const throw() __TBB_override { return my_exception_name; } + + const char* what() const throw() __TBB_override { return "tbb::movable_exception"; } + + movable_exception* move() throw() __TBB_override { + void* e = internal::allocate_via_handler_v3(sizeof(movable_exception)); + if ( e ) { + ::new (e) movable_exception(*this); + ((movable_exception*)e)->my_dynamic = true; + } + return (movable_exception*)e; + } + void destroy() throw() __TBB_override { + __TBB_ASSERT ( my_dynamic, "Method destroy can be called only on dynamically allocated movable_exceptions" ); + if ( my_dynamic ) { + this->~movable_exception(); + internal::deallocate_via_handler_v3(this); + } + } + void throw_self() __TBB_override { __TBB_THROW( *this ); } + +protected: + //! User data + ExceptionData my_exception_data; + +private: + //! Flag specifying whether this object has been dynamically allocated (by the move method) + bool my_dynamic; + + //! RTTI name of this class + /** We rely on the fact that RTTI names are static string constants. **/ + const char* my_exception_name; +}; + +#if !TBB_USE_CAPTURED_EXCEPTION +namespace internal { + +//! Exception container that preserves the exact copy of the original exception +/** This class can be used only when the appropriate runtime support (mandated + by C++11) is present **/ +class tbb_exception_ptr { + std::exception_ptr my_ptr; + +public: + static tbb_exception_ptr* allocate(); + static tbb_exception_ptr* allocate( const tbb_exception& tag ); + //! This overload uses move semantics (i.e. it empties src) + static tbb_exception_ptr* allocate( captured_exception& src ); + + //! Destroys this objects + /** Note that objects of this type can be created only by the allocate() method. **/ + void destroy() throw(); + + //! Throws the contained exception . + void throw_self() { std::rethrow_exception(my_ptr); } + +private: + tbb_exception_ptr( const std::exception_ptr& src ) : my_ptr(src) {} + tbb_exception_ptr( const captured_exception& src ) : + #if __TBB_MAKE_EXCEPTION_PTR_PRESENT + my_ptr(std::make_exception_ptr(src)) // the final function name in C++11 + #else + my_ptr(std::copy_exception(src)) // early C++0x drafts name + #endif + {} +}; // class tbb::internal::tbb_exception_ptr + +} // namespace internal +#endif /* !TBB_USE_CAPTURED_EXCEPTION */ + +} // namespace tbb + +#endif /* __TBB_TASK_GROUP_CONTEXT */ + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_tbb_exception_H_include_area + +#endif /* __TBB_exception_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbb_machine.h b/ohos/arm64-v8a/include/tbb/tbb_machine.h new file mode 100644 index 00000000..9752be58 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb_machine.h @@ -0,0 +1,978 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_machine_H +#define __TBB_machine_H + +/** This header provides basic platform abstraction layer by hooking up appropriate + architecture/OS/compiler specific headers from the /include/tbb/machine directory. + If a plug-in header does not implement all the required APIs, it must specify + the missing ones by setting one or more of the following macros: + + __TBB_USE_GENERIC_PART_WORD_CAS + __TBB_USE_GENERIC_PART_WORD_FETCH_ADD + __TBB_USE_GENERIC_PART_WORD_FETCH_STORE + __TBB_USE_GENERIC_FETCH_ADD + __TBB_USE_GENERIC_FETCH_STORE + __TBB_USE_GENERIC_DWORD_FETCH_ADD + __TBB_USE_GENERIC_DWORD_FETCH_STORE + __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE + __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE + __TBB_USE_GENERIC_RELAXED_LOAD_STORE + __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE + + In this case tbb_machine.h will add missing functionality based on a minimal set + of APIs that are required to be implemented by all plug-n headers as described + further. + Note that these generic implementations may be sub-optimal for a particular + architecture, and thus should be relied upon only after careful evaluation + or as the last resort. + + Additionally __TBB_64BIT_ATOMICS can be set to 0 on a 32-bit architecture to + indicate that the port is not going to support double word atomics. It may also + be set to 1 explicitly, though normally this is not necessary as tbb_machine.h + will set it automatically. + + __TBB_ENDIANNESS macro can be defined by the implementation as well. + It is used only if __TBB_USE_GENERIC_PART_WORD_CAS is set (or for testing), + and must specify the layout of aligned 16-bit and 32-bit data anywhere within a process + (while the details of unaligned 16-bit or 32-bit data or of 64-bit data are irrelevant). + The layout must be the same at all relevant memory locations within the current process; + in case of page-specific endianness, one endianness must be kept "out of sight". + Possible settings, reflecting hardware and possibly O.S. convention, are: + - __TBB_ENDIAN_BIG for big-endian data, + - __TBB_ENDIAN_LITTLE for little-endian data, + - __TBB_ENDIAN_DETECT for run-time detection iff exactly one of the above, + - __TBB_ENDIAN_UNSUPPORTED to prevent undefined behavior if none of the above. + + Prerequisites for each architecture port + ---------------------------------------- + The following functions and macros have no generic implementation. Therefore they must be + implemented in each machine architecture specific header either as a conventional + function or as a functional macro. + + __TBB_WORDSIZE + This is the size of machine word in bytes, i.e. for 32 bit systems it + should be defined to 4. + + __TBB_Yield() + Signals OS that the current thread is willing to relinquish the remainder + of its time quantum. + + __TBB_full_memory_fence() + Must prevent all memory operations from being reordered across it (both + by hardware and compiler). All such fences must be totally ordered (or + sequentially consistent). + + __TBB_machine_cmpswp4( volatile void *ptr, int32_t value, int32_t comparand ) + Must be provided if __TBB_USE_FENCED_ATOMICS is not set. + + __TBB_machine_cmpswp8( volatile void *ptr, int32_t value, int64_t comparand ) + Must be provided for 64-bit architectures if __TBB_USE_FENCED_ATOMICS is not set, + and for 32-bit architectures if __TBB_64BIT_ATOMICS is set + + __TBB_machine_(...), where + = {cmpswp, fetchadd, fetchstore} + = {1, 2, 4, 8} + = {full_fence, acquire, release, relaxed} + Must be provided if __TBB_USE_FENCED_ATOMICS is set. + + __TBB_control_consistency_helper() + Bridges the memory-semantics gap between architectures providing only + implicit C++0x "consume" semantics (like Power Architecture) and those + also implicitly obeying control dependencies (like IA-64 architecture). + It must be used only in conditional code where the condition is itself + data-dependent, and will then make subsequent code behave as if the + original data dependency were acquired. + It needs only a compiler fence where implied by the architecture + either specifically (like IA-64 architecture) or because generally stronger + "acquire" semantics are enforced (like x86). + It is always valid, though potentially suboptimal, to replace + control with acquire on the load and then remove the helper. + + __TBB_acquire_consistency_helper(), __TBB_release_consistency_helper() + Must be provided if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE is set. + Enforce acquire and release semantics in generic implementations of fenced + store and load operations. Depending on the particular architecture/compiler + combination they may be a hardware fence, a compiler fence, both or nothing. + **/ + +#include "tbb_stddef.h" + +namespace tbb { +namespace internal { //< @cond INTERNAL + +//////////////////////////////////////////////////////////////////////////////// +// Overridable helpers declarations +// +// A machine/*.h file may choose to define these templates, otherwise it must +// request default implementation by setting appropriate __TBB_USE_GENERIC_XXX macro(s). +// +template +struct machine_load_store; + +template +struct machine_load_store_relaxed; + +template +struct machine_load_store_seq_cst; +// +// End of overridable helpers declarations +//////////////////////////////////////////////////////////////////////////////// + +template struct atomic_selector; + +template<> struct atomic_selector<1> { + typedef int8_t word; + inline static word fetch_store ( volatile void* location, word value ); +}; + +template<> struct atomic_selector<2> { + typedef int16_t word; + inline static word fetch_store ( volatile void* location, word value ); +}; + +template<> struct atomic_selector<4> { +#if _MSC_VER && !_WIN64 + // Work-around that avoids spurious /Wp64 warnings + typedef intptr_t word; +#else + typedef int32_t word; +#endif + inline static word fetch_store ( volatile void* location, word value ); +}; + +template<> struct atomic_selector<8> { + typedef int64_t word; + inline static word fetch_store ( volatile void* location, word value ); +}; + +}} //< namespaces internal @endcond, tbb + +#define __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(M) \ + inline void __TBB_machine_generic_store8##M(volatile void *ptr, int64_t value) { \ + for(;;) { \ + int64_t result = *(volatile int64_t *)ptr; \ + if( __TBB_machine_cmpswp8##M(ptr,value,result)==result ) break; \ + } \ + } \ + +#define __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(M) \ + inline int64_t __TBB_machine_generic_load8##M(const volatile void *ptr) { \ + /* Comparand and new value may be anything, they only must be equal, and */ \ + /* the value should have a low probability to be actually found in 'location'.*/ \ + const int64_t anyvalue = 2305843009213693951LL; \ + return __TBB_machine_cmpswp8##M(const_cast(ptr),anyvalue,anyvalue); \ + } \ + +// The set of allowed values for __TBB_ENDIANNESS (see above for details) +#define __TBB_ENDIAN_UNSUPPORTED -1 +#define __TBB_ENDIAN_LITTLE 0 +#define __TBB_ENDIAN_BIG 1 +#define __TBB_ENDIAN_DETECT 2 + +#if _WIN32||_WIN64 + +#ifdef _MANAGED +#pragma managed(push, off) +#endif + + #if __MINGW64__ || __MINGW32__ + extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); + #define __TBB_Yield() SwitchToThread() + #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT) + #include "machine/gcc_generic.h" + #elif __MINGW64__ + #include "machine/linux_intel64.h" + #elif __MINGW32__ + #include "machine/linux_ia32.h" + #endif + #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) + #include "machine/icc_generic.h" + #elif defined(_M_IX86) && !defined(__TBB_WIN32_USE_CL_BUILTINS) + #include "machine/windows_ia32.h" + #elif defined(_M_X64) + #include "machine/windows_intel64.h" + #elif defined(_M_ARM) || defined(__TBB_WIN32_USE_CL_BUILTINS) + #include "machine/msvc_armv7.h" + #endif + +#ifdef _MANAGED +#pragma managed(pop) +#endif + +#elif __TBB_DEFINE_MIC + + #include "machine/mic_common.h" + #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) + #include "machine/icc_generic.h" + #else + #include "machine/linux_intel64.h" + #endif + +#elif __linux__ || __FreeBSD__ || __NetBSD__ || __OpenBSD__ + + #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT) + #include "machine/gcc_generic.h" + #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) + #include "machine/icc_generic.h" + #elif __i386__ + #include "machine/linux_ia32.h" + #elif __x86_64__ + #include "machine/linux_intel64.h" + #elif __ia64__ + #include "machine/linux_ia64.h" + #elif __powerpc__ + #include "machine/mac_ppc.h" + #elif __ARM_ARCH_7A__ || __aarch64__ + #include "machine/gcc_arm.h" + #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT + #include "machine/gcc_generic.h" + #endif + #include "machine/linux_common.h" + +#elif __APPLE__ + //TODO: TBB_USE_GCC_BUILTINS is not used for Mac, Sun, Aix + #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) + #include "machine/icc_generic.h" + #elif __TBB_x86_32 + #include "machine/linux_ia32.h" + #elif __TBB_x86_64 + #include "machine/linux_intel64.h" + #elif __POWERPC__ + #include "machine/mac_ppc.h" + #endif + #include "machine/macos_common.h" + +#elif _AIX + + #include "machine/ibm_aix51.h" + +#elif __sun || __SUNPRO_CC + + #define __asm__ asm + #define __volatile__ volatile + + #if __i386 || __i386__ + #include "machine/linux_ia32.h" + #elif __x86_64__ + #include "machine/linux_intel64.h" + #elif __sparc + #include "machine/sunos_sparc.h" + #endif + #include + + #define __TBB_Yield() sched_yield() + +#endif /* OS selection */ + +#ifndef __TBB_64BIT_ATOMICS + #define __TBB_64BIT_ATOMICS 1 +#endif + +//TODO: replace usage of these functions with usage of tbb::atomic, and then remove them +//TODO: map functions with W suffix to use cast to tbb::atomic and according op, i.e. as_atomic().op() +// Special atomic functions +#if __TBB_USE_FENCED_ATOMICS + #define __TBB_machine_cmpswp1 __TBB_machine_cmpswp1full_fence + #define __TBB_machine_cmpswp2 __TBB_machine_cmpswp2full_fence + #define __TBB_machine_cmpswp4 __TBB_machine_cmpswp4full_fence + #define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8full_fence + + #if __TBB_WORDSIZE==8 + #define __TBB_machine_fetchadd8 __TBB_machine_fetchadd8full_fence + #define __TBB_machine_fetchstore8 __TBB_machine_fetchstore8full_fence + #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd8release(P,V) + #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd8acquire(P,1) + #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd8release(P,(-1)) + #else + #define __TBB_machine_fetchadd4 __TBB_machine_fetchadd4full_fence + #define __TBB_machine_fetchstore4 __TBB_machine_fetchstore4full_fence + #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd4release(P,V) + #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd4acquire(P,1) + #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd4release(P,(-1)) + #endif /* __TBB_WORDSIZE==4 */ +#else /* !__TBB_USE_FENCED_ATOMICS */ + #define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) + #define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) + #define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1)) +#endif /* !__TBB_USE_FENCED_ATOMICS */ + +#if __TBB_WORDSIZE==4 + #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C) + #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd4(P,V) + #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore4(P,V) +#elif __TBB_WORDSIZE==8 + #if __TBB_USE_GENERIC_DWORD_LOAD_STORE || __TBB_USE_GENERIC_DWORD_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_STORE + #error These macros should only be used on 32-bit platforms. + #endif + + #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) + #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V) + #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V) +#else /* __TBB_WORDSIZE != 8 */ + #error Unsupported machine word size. +#endif /* __TBB_WORDSIZE */ + +#ifndef __TBB_Pause + inline void __TBB_Pause(int32_t) { + __TBB_Yield(); + } +#endif + +namespace tbb { + +//! Sequentially consistent full memory fence. +inline void atomic_fence () { __TBB_full_memory_fence(); } + +namespace internal { //< @cond INTERNAL + +//! Class that implements exponential backoff. +/** See implementation of spin_wait_while_eq for an example. */ +class atomic_backoff : no_copy { + //! Time delay, in units of "pause" instructions. + /** Should be equal to approximately the number of "pause" instructions + that take the same time as an context switch. Must be a power of two.*/ + static const int32_t LOOPS_BEFORE_YIELD = 16; + int32_t count; +public: + // In many cases, an object of this type is initialized eagerly on hot path, + // as in for(atomic_backoff b; ; b.pause()) { /*loop body*/ } + // For this reason, the construction cost must be very small! + atomic_backoff() : count(1) {} + // This constructor pauses immediately; do not use on hot paths! + atomic_backoff( bool ) : count(1) { pause(); } + + //! Pause for a while. + void pause() { + if( count<=LOOPS_BEFORE_YIELD ) { + __TBB_Pause(count); + // Pause twice as long the next time. + count*=2; + } else { + // Pause is so long that we might as well yield CPU to scheduler. + __TBB_Yield(); + } + } + + //! Pause for a few times and return false if saturated. + bool bounded_pause() { + __TBB_Pause(count); + if( count +void spin_wait_while_eq( const volatile T& location, U value ) { + atomic_backoff backoff; + while( location==value ) backoff.pause(); +} + +//! Spin UNTIL the value of the variable is equal to a given value +/** T and U should be comparable types. */ +template +void spin_wait_until_eq( const volatile T& location, const U value ) { + atomic_backoff backoff; + while( location!=value ) backoff.pause(); +} + +template +void spin_wait_while(predicate_type condition){ + atomic_backoff backoff; + while( condition() ) backoff.pause(); +} + +//////////////////////////////////////////////////////////////////////////////// +// Generic compare-and-swap applied to only a part of a machine word. +// +#ifndef __TBB_ENDIANNESS +#define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT +#endif + +#if __TBB_USE_GENERIC_PART_WORD_CAS && __TBB_ENDIANNESS==__TBB_ENDIAN_UNSUPPORTED +#error Generic implementation of part-word CAS may not be used with __TBB_ENDIAN_UNSUPPORTED +#endif + +#if __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED +// +// This function is the only use of __TBB_ENDIANNESS. +// The following restrictions/limitations apply for this operation: +// - T must be an integer type of at most 4 bytes for the casts and calculations to work +// - T must also be less than 4 bytes to avoid compiler warnings when computing mask +// (and for the operation to be useful at all, so no workaround is applied) +// - the architecture must consistently use either little-endian or big-endian (same for all locations) +// +// TODO: static_assert for the type requirements stated above +template +inline T __TBB_MaskedCompareAndSwap (volatile T * const ptr, const T value, const T comparand ) { + struct endianness{ static bool is_big_endian(){ + #if __TBB_ENDIANNESS==__TBB_ENDIAN_DETECT + const uint32_t probe = 0x03020100; + return (((const char*)(&probe))[0]==0x03); + #elif __TBB_ENDIANNESS==__TBB_ENDIAN_BIG || __TBB_ENDIANNESS==__TBB_ENDIAN_LITTLE + return __TBB_ENDIANNESS==__TBB_ENDIAN_BIG; + #else + #error Unexpected value of __TBB_ENDIANNESS + #endif + }}; + + const uint32_t byte_offset = (uint32_t) ((uintptr_t)ptr & 0x3); + volatile uint32_t * const aligned_ptr = (uint32_t*)((uintptr_t)ptr - byte_offset ); + + // location of T within uint32_t for a C++ shift operation + const uint32_t bits_to_shift = 8*(endianness::is_big_endian() ? (4 - sizeof(T) - (byte_offset)) : byte_offset); + const uint32_t mask = (((uint32_t)1<<(sizeof(T)*8)) - 1 )<> bits_to_shift); + } + else continue; // CAS failed but the bits of interest were not changed + } +} +#endif // __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED +//////////////////////////////////////////////////////////////////////////////// + +template +inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand ); + +template<> +inline int8_t __TBB_CompareAndSwapGeneric <1,int8_t> (volatile void *ptr, int8_t value, int8_t comparand ) { +#if __TBB_USE_GENERIC_PART_WORD_CAS + return __TBB_MaskedCompareAndSwap((volatile int8_t *)ptr,value,comparand); +#else + return __TBB_machine_cmpswp1(ptr,value,comparand); +#endif +} + +template<> +inline int16_t __TBB_CompareAndSwapGeneric <2,int16_t> (volatile void *ptr, int16_t value, int16_t comparand ) { +#if __TBB_USE_GENERIC_PART_WORD_CAS + return __TBB_MaskedCompareAndSwap((volatile int16_t *)ptr,value,comparand); +#else + return __TBB_machine_cmpswp2(ptr,value,comparand); +#endif +} + +template<> +inline int32_t __TBB_CompareAndSwapGeneric <4,int32_t> (volatile void *ptr, int32_t value, int32_t comparand ) { + // Cast shuts up /Wp64 warning + return (int32_t)__TBB_machine_cmpswp4(ptr,value,comparand); +} + +#if __TBB_64BIT_ATOMICS +template<> +inline int64_t __TBB_CompareAndSwapGeneric <8,int64_t> (volatile void *ptr, int64_t value, int64_t comparand ) { + return __TBB_machine_cmpswp8(ptr,value,comparand); +} +#endif + +template +inline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) { + T result; + for( atomic_backoff b;;b.pause() ) { + result = *reinterpret_cast(ptr); + // __TBB_CompareAndSwapGeneric presumed to have full fence. + if( __TBB_CompareAndSwapGeneric ( ptr, result+addend, result )==result ) + break; + } + return result; +} + +template +inline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) { + T result; + for( atomic_backoff b;;b.pause() ) { + result = *reinterpret_cast(ptr); + // __TBB_CompareAndSwapGeneric presumed to have full fence. + if( __TBB_CompareAndSwapGeneric ( ptr, value, result )==result ) + break; + } + return result; +} + +#if __TBB_USE_GENERIC_PART_WORD_CAS +#define __TBB_machine_cmpswp1 tbb::internal::__TBB_CompareAndSwapGeneric<1,int8_t> +#define __TBB_machine_cmpswp2 tbb::internal::__TBB_CompareAndSwapGeneric<2,int16_t> +#endif + +#if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_PART_WORD_FETCH_ADD +#define __TBB_machine_fetchadd1 tbb::internal::__TBB_FetchAndAddGeneric<1,int8_t> +#define __TBB_machine_fetchadd2 tbb::internal::__TBB_FetchAndAddGeneric<2,int16_t> +#endif + +#if __TBB_USE_GENERIC_FETCH_ADD +#define __TBB_machine_fetchadd4 tbb::internal::__TBB_FetchAndAddGeneric<4,int32_t> +#endif + +#if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_ADD +#define __TBB_machine_fetchadd8 tbb::internal::__TBB_FetchAndAddGeneric<8,int64_t> +#endif + +#if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_PART_WORD_FETCH_STORE +#define __TBB_machine_fetchstore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,int8_t> +#define __TBB_machine_fetchstore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,int16_t> +#endif + +#if __TBB_USE_GENERIC_FETCH_STORE +#define __TBB_machine_fetchstore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,int32_t> +#endif + +#if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_DWORD_FETCH_STORE +#define __TBB_machine_fetchstore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,int64_t> +#endif + +#if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE +#define __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(S) \ + atomic_selector::word atomic_selector::fetch_store ( volatile void* location, word value ) { \ + return __TBB_machine_fetchstore##S( location, value ); \ + } + +__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(1) +__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(2) +__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(4) +__TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(8) + +#undef __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE +#endif /* __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ + +#if __TBB_USE_GENERIC_DWORD_LOAD_STORE +/*TODO: find a more elegant way to handle function names difference*/ +#if ! __TBB_USE_FENCED_ATOMICS + /* This name forwarding is needed for generic implementation of + * load8/store8 defined below (via macro) to pick the right CAS function*/ + #define __TBB_machine_cmpswp8full_fence __TBB_machine_cmpswp8 +#endif +__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence) +__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence) + +#if ! __TBB_USE_FENCED_ATOMICS + #undef __TBB_machine_cmpswp8full_fence +#endif + +#define __TBB_machine_store8 tbb::internal::__TBB_machine_generic_store8full_fence +#define __TBB_machine_load8 tbb::internal::__TBB_machine_generic_load8full_fence +#endif /* __TBB_USE_GENERIC_DWORD_LOAD_STORE */ + +#if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE +/** Fenced operations use volatile qualifier to prevent compiler from optimizing + them out, and on architectures with weak memory ordering to induce compiler + to generate code with appropriate acquire/release semantics. + On architectures like IA32, Intel64 (and likely Sparc TSO) volatile has + no effect on code gen, and consistency helpers serve as a compiler fence (the + latter being true for IA64/gcc as well to fix a bug in some gcc versions). + This code assumes that the generated instructions will operate atomically, + which typically requires a type that can be moved in a single instruction, + cooperation from the compiler for effective use of such an instruction, + and appropriate alignment of the data. **/ +template +struct machine_load_store { + static T load_with_acquire ( const volatile T& location ) { + T to_return = location; + __TBB_acquire_consistency_helper(); + return to_return; + } + static void store_with_release ( volatile T &location, T value ) { + __TBB_release_consistency_helper(); + location = value; + } +}; + +//in general, plain load and store of 32bit compiler is not atomic for 64bit types +#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS +template +struct machine_load_store { + static T load_with_acquire ( const volatile T& location ) { + return (T)__TBB_machine_load8( (const volatile void*)&location ); + } + static void store_with_release ( volatile T& location, T value ) { + __TBB_machine_store8( (volatile void*)&location, (int64_t)value ); + } +}; +#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ +#endif /* __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE */ + +#if __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE +template +struct machine_load_store_seq_cst { + static T load ( const volatile T& location ) { + __TBB_full_memory_fence(); + return machine_load_store::load_with_acquire( location ); + } +#if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE + static void store ( volatile T &location, T value ) { + atomic_selector::fetch_store( (volatile void*)&location, (typename atomic_selector::word)value ); + } +#else /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ + static void store ( volatile T &location, T value ) { + machine_load_store::store_with_release( location, value ); + __TBB_full_memory_fence(); + } +#endif /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */ +}; + +#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS +/** The implementation does not use functions __TBB_machine_load8/store8 as they + are not required to be sequentially consistent. **/ +template +struct machine_load_store_seq_cst { + static T load ( const volatile T& location ) { + // Comparand and new value may be anything, they only must be equal, and + // the value should have a low probability to be actually found in 'location'. + const int64_t anyvalue = 2305843009213693951LL; + return __TBB_machine_cmpswp8( (volatile void*)const_cast(&location), anyvalue, anyvalue ); + } + static void store ( volatile T &location, T value ) { +#if __TBB_GCC_VERSION >= 40702 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + // An atomic initialization leads to reading of uninitialized memory + int64_t result = (volatile int64_t&)location; +#if __TBB_GCC_VERSION >= 40702 +#pragma GCC diagnostic pop +#endif + while ( __TBB_machine_cmpswp8((volatile void*)&location, (int64_t)value, result) != result ) + result = (volatile int64_t&)location; + } +}; +#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ +#endif /*__TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE */ + +#if __TBB_USE_GENERIC_RELAXED_LOAD_STORE +// Relaxed operations add volatile qualifier to prevent compiler from optimizing them out. +/** Volatile should not incur any additional cost on IA32, Intel64, and Sparc TSO + architectures. However on architectures with weak memory ordering compiler may + generate code with acquire/release semantics for operations on volatile data. **/ +template +struct machine_load_store_relaxed { + static inline T load ( const volatile T& location ) { + return location; + } + static inline void store ( volatile T& location, T value ) { + location = value; + } +}; + +#if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS +template +struct machine_load_store_relaxed { + static inline T load ( const volatile T& location ) { + return (T)__TBB_machine_load8( (const volatile void*)&location ); + } + static inline void store ( volatile T& location, T value ) { + __TBB_machine_store8( (volatile void*)&location, (int64_t)value ); + } +}; +#endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */ +#endif /* __TBB_USE_GENERIC_RELAXED_LOAD_STORE */ + +#undef __TBB_WORDSIZE //this macro is forbidden to use outside of atomic machinery + +template +inline T __TBB_load_with_acquire(const volatile T &location) { + return machine_load_store::load_with_acquire( location ); +} +template +inline void __TBB_store_with_release(volatile T& location, V value) { + machine_load_store::store_with_release( location, T(value) ); +} +//! Overload that exists solely to avoid /Wp64 warnings. +inline void __TBB_store_with_release(volatile size_t& location, size_t value) { + machine_load_store::store_with_release( location, value ); +} + +template +inline T __TBB_load_full_fence(const volatile T &location) { + return machine_load_store_seq_cst::load( location ); +} +template +inline void __TBB_store_full_fence(volatile T& location, V value) { + machine_load_store_seq_cst::store( location, T(value) ); +} +//! Overload that exists solely to avoid /Wp64 warnings. +inline void __TBB_store_full_fence(volatile size_t& location, size_t value) { + machine_load_store_seq_cst::store( location, value ); +} + +template +inline T __TBB_load_relaxed (const volatile T& location) { + return machine_load_store_relaxed::load( const_cast(location) ); +} +template +inline void __TBB_store_relaxed ( volatile T& location, V value ) { + machine_load_store_relaxed::store( const_cast(location), T(value) ); +} +//! Overload that exists solely to avoid /Wp64 warnings. +inline void __TBB_store_relaxed ( volatile size_t& location, size_t value ) { + machine_load_store_relaxed::store( const_cast(location), value ); +} + +// Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as +// strict as type T. The type should have a trivial default constructor and destructor, so that +// arrays of that type can be declared without initializers. +// It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands +// to a type bigger than T. +// The default definition here works on machines where integers are naturally aligned and the +// strictest alignment is 64. +#ifndef __TBB_TypeWithAlignmentAtLeastAsStrict + +#if __TBB_ALIGNAS_PRESENT + +// Use C++11 keywords alignas and alignof +#define __TBB_DefineTypeWithAlignment(PowerOf2) \ +struct alignas(PowerOf2) __TBB_machine_type_with_alignment_##PowerOf2 { \ + uint32_t member[PowerOf2/sizeof(uint32_t)]; \ +}; +#define __TBB_alignof(T) alignof(T) + +#elif __TBB_ATTRIBUTE_ALIGNED_PRESENT + +#define __TBB_DefineTypeWithAlignment(PowerOf2) \ +struct __TBB_machine_type_with_alignment_##PowerOf2 { \ + uint32_t member[PowerOf2/sizeof(uint32_t)]; \ +} __attribute__((aligned(PowerOf2))); +#define __TBB_alignof(T) __alignof__(T) + +#elif __TBB_DECLSPEC_ALIGN_PRESENT + +#define __TBB_DefineTypeWithAlignment(PowerOf2) \ +__declspec(align(PowerOf2)) \ +struct __TBB_machine_type_with_alignment_##PowerOf2 { \ + uint32_t member[PowerOf2/sizeof(uint32_t)]; \ +}; +#define __TBB_alignof(T) __alignof(T) + +#else /* A compiler with unknown syntax for data alignment */ +#error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T) +#endif + +/* Now declare types aligned to useful powers of two */ +__TBB_DefineTypeWithAlignment(8) // i386 ABI says that uint64_t is aligned on 4 bytes +__TBB_DefineTypeWithAlignment(16) +__TBB_DefineTypeWithAlignment(32) +__TBB_DefineTypeWithAlignment(64) + +typedef __TBB_machine_type_with_alignment_64 __TBB_machine_type_with_strictest_alignment; + +// Primary template is a declaration of incomplete type so that it fails with unknown alignments +template struct type_with_alignment; + +// Specializations for allowed alignments +template<> struct type_with_alignment<1> { char member; }; +template<> struct type_with_alignment<2> { uint16_t member; }; +template<> struct type_with_alignment<4> { uint32_t member; }; +template<> struct type_with_alignment<8> { __TBB_machine_type_with_alignment_8 member; }; +template<> struct type_with_alignment<16> {__TBB_machine_type_with_alignment_16 member; }; +template<> struct type_with_alignment<32> {__TBB_machine_type_with_alignment_32 member; }; +template<> struct type_with_alignment<64> {__TBB_machine_type_with_alignment_64 member; }; + +#if __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN +//! Work around for bug in GNU 3.2 and MSVC compilers. +/** Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated. + The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). */ +template +struct work_around_alignment_bug { + static const size_t alignment = __TBB_alignof(T); +}; +#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment::alignment> +#else +#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__TBB_alignof(T)> +#endif /* __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN */ + +#endif /* __TBB_TypeWithAlignmentAtLeastAsStrict */ + +// Template class here is to avoid instantiation of the static data for modules that don't use it +template +struct reverse { + static const T byte_table[256]; +}; +// An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed +// values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost. +template +const T reverse::byte_table[256] = { + 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, + 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, + 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, + 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, + 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, + 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, + 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, + 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, + 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, + 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, + 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, + 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, + 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, + 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, + 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, + 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF +}; + +} // namespace internal @endcond +} // namespace tbb + +// Preserving access to legacy APIs +using tbb::internal::__TBB_load_with_acquire; +using tbb::internal::__TBB_store_with_release; + +// Mapping historically used names to the ones expected by atomic_load_store_traits +#define __TBB_load_acquire __TBB_load_with_acquire +#define __TBB_store_release __TBB_store_with_release + +#ifndef __TBB_Log2 +inline intptr_t __TBB_Log2( uintptr_t x ) { + if( x==0 ) return -1; + intptr_t result = 0; + +#if !defined(_M_ARM) + uintptr_t tmp_; + if( sizeof(x)>4 && (tmp_ = ((uint64_t)x)>>32) ) { x=tmp_; result += 32; } +#endif + if( uintptr_t tmp = x>>16 ) { x=tmp; result += 16; } + if( uintptr_t tmp = x>>8 ) { x=tmp; result += 8; } + if( uintptr_t tmp = x>>4 ) { x=tmp; result += 4; } + if( uintptr_t tmp = x>>2 ) { x=tmp; result += 2; } + + return (x&2)? result+1: result; +} +#endif + +#ifndef __TBB_AtomicOR +inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) { + for( tbb::internal::atomic_backoff b;;b.pause() ) { + uintptr_t tmp = *(volatile uintptr_t *)operand; + uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp); + if( result==tmp ) break; + } +} +#endif + +#ifndef __TBB_AtomicAND +inline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) { + for( tbb::internal::atomic_backoff b;;b.pause() ) { + uintptr_t tmp = *(volatile uintptr_t *)operand; + uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp); + if( result==tmp ) break; + } +} +#endif + +#if __TBB_PREFETCHING +#ifndef __TBB_cl_prefetch +#error This platform does not define cache management primitives required for __TBB_PREFETCHING +#endif + +#ifndef __TBB_cl_evict +#define __TBB_cl_evict(p) +#endif +#endif + +#ifndef __TBB_Flag +typedef unsigned char __TBB_Flag; +#endif +typedef __TBB_atomic __TBB_Flag __TBB_atomic_flag; + +#ifndef __TBB_TryLockByte +inline bool __TBB_TryLockByte( __TBB_atomic_flag &flag ) { + return __TBB_machine_cmpswp1(&flag,1,0)==0; +} +#endif + +#ifndef __TBB_LockByte +inline __TBB_Flag __TBB_LockByte( __TBB_atomic_flag& flag ) { + tbb::internal::atomic_backoff backoff; + while( !__TBB_TryLockByte(flag) ) backoff.pause(); + return 0; +} +#endif + +#ifndef __TBB_UnlockByte +#define __TBB_UnlockByte(addr) __TBB_store_with_release((addr),0) +#endif + +// lock primitives with Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) +#if ( __TBB_x86_32 || __TBB_x86_64 ) /* only on ia32/intel64 */ +inline void __TBB_TryLockByteElidedCancel() { __TBB_machine_try_lock_elided_cancel(); } + +inline bool __TBB_TryLockByteElided( __TBB_atomic_flag& flag ) { + bool res = __TBB_machine_try_lock_elided( &flag )!=0; + // to avoid the "lemming" effect, we need to abort the transaction + // if __TBB_machine_try_lock_elided returns false (i.e., someone else + // has acquired the mutex non-speculatively). + if( !res ) __TBB_TryLockByteElidedCancel(); + return res; +} + +inline void __TBB_LockByteElided( __TBB_atomic_flag& flag ) +{ + for(;;) { + tbb::internal::spin_wait_while_eq( flag, 1 ); + if( __TBB_machine_try_lock_elided( &flag ) ) + return; + // Another thread acquired the lock "for real". + // To avoid the "lemming" effect, we abort the transaction. + __TBB_TryLockByteElidedCancel(); + } +} + +inline void __TBB_UnlockByteElided( __TBB_atomic_flag& flag ) { + __TBB_machine_unlock_elided( &flag ); +} +#endif + +#ifndef __TBB_ReverseByte +inline unsigned char __TBB_ReverseByte(unsigned char src) { + return tbb::internal::reverse::byte_table[src]; +} +#endif + +template +T __TBB_ReverseBits(T src) { + T dst; + unsigned char *original = (unsigned char *) &src; + unsigned char *reversed = (unsigned char *) &dst; + + for( int i = sizeof(T)-1; i >= 0; i-- ) + reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] ); + + return dst; +} + +#endif /* __TBB_machine_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbb_profiling.h b/ohos/arm64-v8a/include/tbb/tbb_profiling.h new file mode 100644 index 00000000..20f8f512 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb_profiling.h @@ -0,0 +1,355 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_profiling_H +#define __TBB_profiling_H + +#define __TBB_tbb_profiling_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +namespace tbb { + namespace internal { + + // include list of index names + #define TBB_STRING_RESOURCE(index_name,str) index_name, + enum string_index { + #include "internal/_tbb_strings.h" + NUM_STRINGS + }; + #undef TBB_STRING_RESOURCE + + enum itt_relation + { + __itt_relation_is_unknown = 0, + __itt_relation_is_dependent_on, /**< "A is dependent on B" means that A cannot start until B completes */ + __itt_relation_is_sibling_of, /**< "A is sibling of B" means that A and B were created as a group */ + __itt_relation_is_parent_of, /**< "A is parent of B" means that A created B */ + __itt_relation_is_continuation_of, /**< "A is continuation of B" means that A assumes the dependencies of B */ + __itt_relation_is_child_of, /**< "A is child of B" means that A was created by B (inverse of is_parent_of) */ + __itt_relation_is_continued_by, /**< "A is continued by B" means that B assumes the dependencies of A (inverse of is_continuation_of) */ + __itt_relation_is_predecessor_to /**< "A is predecessor to B" means that B cannot start until A completes (inverse of is_dependent_on) */ + }; + + } +} + +// Check if the tools support is enabled +#if (_WIN32||_WIN64||__linux__) && !__MINGW32__ && TBB_USE_THREADING_TOOLS + +#if _WIN32||_WIN64 +#include /* mbstowcs_s */ +#endif +#include "tbb_stddef.h" + +namespace tbb { + namespace internal { + +#if _WIN32||_WIN64 + void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wchar_t* name ); + inline size_t multibyte_to_widechar( wchar_t* wcs, const char* mbs, size_t bufsize) { +#if _MSC_VER>=1400 + size_t len; + mbstowcs_s( &len, wcs, bufsize, mbs, _TRUNCATE ); + return len; // mbstowcs_s counts null terminator +#else + size_t len = mbstowcs( wcs, mbs, bufsize ); + if(wcs && len!=size_t(-1) ) + wcs[len + inline void itt_store_word_with_release(tbb::atomic& dst, U src) { +#if TBB_USE_THREADING_TOOLS + // This assertion should be replaced with static_assert + __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); + itt_store_pointer_with_release_v3(&dst, (void *)uintptr_t(src)); +#else + dst = src; +#endif // TBB_USE_THREADING_TOOLS + } + + template + inline T itt_load_word_with_acquire(const tbb::atomic& src) { +#if TBB_USE_THREADING_TOOLS + // This assertion should be replaced with static_assert + __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + // Workaround for overzealous compiler warnings + #pragma warning (push) + #pragma warning (disable: 4311) +#endif + T result = (T)itt_load_pointer_with_acquire_v3(&src); +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) + #pragma warning (pop) +#endif + return result; +#else + return src; +#endif // TBB_USE_THREADING_TOOLS + } + + template + inline void itt_store_word_with_release(T& dst, T src) { +#if TBB_USE_THREADING_TOOLS + // This assertion should be replaced with static_assert + __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); + itt_store_pointer_with_release_v3(&dst, (void *)src); +#else + __TBB_store_with_release(dst, src); +#endif // TBB_USE_THREADING_TOOLS + } + + template + inline T itt_load_word_with_acquire(const T& src) { +#if TBB_USE_THREADING_TOOLS + // This assertion should be replaced with static_assert + __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized"); + return (T)itt_load_pointer_with_acquire_v3(&src); +#else + return __TBB_load_with_acquire(src); +#endif // TBB_USE_THREADING_TOOLS + } + + template + inline void itt_hide_store_word(T& dst, T src) { +#if TBB_USE_THREADING_TOOLS + //TODO: This assertion should be replaced with static_assert + __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized"); + itt_store_pointer_with_release_v3(&dst, (void *)src); +#else + dst = src; +#endif + } + + //TODO: rename to itt_hide_load_word_relaxed + template + inline T itt_hide_load_word(const T& src) { +#if TBB_USE_THREADING_TOOLS + //TODO: This assertion should be replaced with static_assert + __TBB_ASSERT(sizeof(T) == sizeof(void *), "Type must be word-sized."); + return (T)itt_load_pointer_v3(&src); +#else + return src; +#endif + } + +#if TBB_USE_THREADING_TOOLS + inline void call_itt_notify(notify_type t, void *ptr) { + call_itt_notify_v5((int)t, ptr); + } + + inline void itt_make_task_group( itt_domain_enum domain, void *group, unsigned long long group_extra, + void *parent, unsigned long long parent_extra, string_index name_index ) { + itt_make_task_group_v7( domain, group, group_extra, parent, parent_extra, name_index ); + } + + inline void itt_metadata_str_add( itt_domain_enum domain, void *addr, unsigned long long addr_extra, + string_index key, const char *value ) { + itt_metadata_str_add_v7( domain, addr, addr_extra, key, value ); + } + + inline void register_node_addr(itt_domain_enum domain, void *addr, unsigned long long addr_extra, + string_index key, void *value) { + itt_metadata_ptr_add_v11(domain, addr, addr_extra, key, value); + } + + inline void itt_relation_add( itt_domain_enum domain, void *addr0, unsigned long long addr0_extra, + itt_relation relation, void *addr1, unsigned long long addr1_extra ) { + itt_relation_add_v7( domain, addr0, addr0_extra, relation, addr1, addr1_extra ); + } + + inline void itt_task_begin( itt_domain_enum domain, void *task, unsigned long long task_extra, + void *parent, unsigned long long parent_extra, string_index name_index ) { + itt_task_begin_v7( domain, task, task_extra, parent, parent_extra, name_index ); + } + + inline void itt_task_end( itt_domain_enum domain ) { + itt_task_end_v7( domain ); + } + + inline void itt_region_begin( itt_domain_enum domain, void *region, unsigned long long region_extra, + void *parent, unsigned long long parent_extra, string_index name_index ) { + itt_region_begin_v9( domain, region, region_extra, parent, parent_extra, name_index ); + } + + inline void itt_region_end( itt_domain_enum domain, void *region, unsigned long long region_extra ) { + itt_region_end_v9( domain, region, region_extra ); + } +#else + inline void register_node_addr( itt_domain_enum /*domain*/, void* /*addr*/, unsigned long long /*addr_extra*/, string_index /*key*/, void* /*value*/ ) {} + inline void call_itt_notify(notify_type /*t*/, void* /*ptr*/) {} + + inline void itt_make_task_group( itt_domain_enum /*domain*/, void* /*group*/, unsigned long long /*group_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_index /*name_index*/ ) {} + + inline void itt_metadata_str_add( itt_domain_enum /*domain*/, void* /*addr*/, unsigned long long /*addr_extra*/, + string_index /*key*/, const char* /*value*/ ) {} + + inline void itt_relation_add( itt_domain_enum /*domain*/, void* /*addr0*/, unsigned long long /*addr0_extra*/, + itt_relation /*relation*/, void* /*addr1*/, unsigned long long /*addr1_extra*/ ) {} + + inline void itt_task_begin( itt_domain_enum /*domain*/, void* /*task*/, unsigned long long /*task_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_index /*name_index*/ ) {} + + inline void itt_task_end( itt_domain_enum /*domain*/ ) {} + + inline void itt_region_begin( itt_domain_enum /*domain*/, void* /*region*/, unsigned long long /*region_extra*/, + void* /*parent*/, unsigned long long /*parent_extra*/, string_index /*name_index*/ ) {} + + inline void itt_region_end( itt_domain_enum /*domain*/, void* /*region*/, unsigned long long /*region_extra*/ ) {} +#endif // TBB_USE_THREADING_TOOLS + + } // namespace internal +} // namespace tbb + +#if TBB_PREVIEW_FLOW_GRAPH_TRACE +#include + +namespace tbb { +namespace profiling { +namespace interface10 { + +#if TBB_USE_THREADING_TOOLS && !(TBB_USE_THREADING_TOOLS == 2) +class event { +/** This class supports user event traces through itt. + Common use-case is tagging data flow graph tasks (data-id) + and visualization by Intel Advisor Flow Graph Analyzer (FGA) **/ +// TODO: Replace implementation by itt user event api. + + const std::string my_name; + + static void emit_trace(const std::string &input) { + itt_metadata_str_add( tbb::internal::ITT_DOMAIN_FLOW, NULL, tbb::internal::FLOW_NULL, tbb::internal::USER_EVENT, ( "FGA::DATAID::" + input ).c_str() ); + } + +public: + event(const std::string &input) + : my_name( input ) + { } + + void emit() { + emit_trace(my_name); + } + + static void emit(const std::string &description) { + emit_trace(description); + } + +}; +#else // TBB_USE_THREADING_TOOLS && !(TBB_USE_THREADING_TOOLS == 2) +// Using empty struct if user event tracing is disabled: +struct event { + event(const std::string &) { } + + void emit() { } + + static void emit(const std::string &) { } +}; +#endif // TBB_USE_THREADING_TOOLS && !(TBB_USE_THREADING_TOOLS == 2) + +} // interfaceX +using interface10::event; +} // namespace profiling +} // namespace tbb +#endif // TBB_PREVIEW_FLOW_GRAPH_TRACE + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_tbb_profiling_H_include_area + +#endif /* __TBB_profiling_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbb_stddef.h b/ohos/arm64-v8a/include/tbb/tbb_stddef.h new file mode 100644 index 00000000..aed4d1c4 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb_stddef.h @@ -0,0 +1,565 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tbb_stddef_H +#define __TBB_tbb_stddef_H + +// Marketing-driven product version +#define TBB_VERSION_MAJOR 2020 +#define TBB_VERSION_MINOR 2 + +// Engineering-focused interface version +#define TBB_INTERFACE_VERSION 11102 +#define TBB_INTERFACE_VERSION_MAJOR TBB_INTERFACE_VERSION/1000 + +// The oldest major interface version still supported +// To be used in SONAME, manifests, etc. +#define TBB_COMPATIBLE_INTERFACE_VERSION 2 + +#define __TBB_STRING_AUX(x) #x +#define __TBB_STRING(x) __TBB_STRING_AUX(x) + +// We do not need defines below for resource processing on windows +#if !defined RC_INVOKED + +// Define groups for Doxygen documentation +/** + * @defgroup algorithms Algorithms + * @defgroup containers Containers + * @defgroup memory_allocation Memory Allocation + * @defgroup synchronization Synchronization + * @defgroup timing Timing + * @defgroup task_scheduling Task Scheduling + */ + +// Simple text that is displayed on the main page of Doxygen documentation. +/** + * \mainpage Main Page + * + * Click the tabs above for information about the + * - Modules (groups of functionality) implemented by the library + * - Classes provided by the library + * - Files constituting the library. + * . + * Please note that significant part of TBB functionality is implemented in the form of + * template functions, descriptions of which are not accessible on the Classes + * tab. Use Modules or Namespace/Namespace Members + * tabs to find them. + * + * Additional pieces of information can be found here + * - \subpage concepts + * . + */ + +/** \page concepts TBB concepts + + A concept is a set of requirements to a type, which are necessary and sufficient + for the type to model a particular behavior or a set of behaviors. Some concepts + are specific to a particular algorithm (e.g. algorithm body), while other ones + are common to several algorithms (e.g. range concept). + + All TBB algorithms make use of different classes implementing various concepts. + Implementation classes are supplied by the user as type arguments of template + parameters and/or as objects passed as function call arguments. The library + provides predefined implementations of some concepts (e.g. several kinds of + \ref range_req "ranges"), while other ones must always be implemented by the user. + + TBB defines a set of minimal requirements each concept must conform to. Here is + the list of different concepts hyperlinked to the corresponding requirements specifications: + - \subpage range_req + - \subpage parallel_do_body_req + - \subpage parallel_for_body_req + - \subpage parallel_reduce_body_req + - \subpage parallel_scan_body_req + - \subpage parallel_sort_iter_req +**/ + +// tbb_config.h should be included the first since it contains macro definitions used in other headers +#include "tbb_config.h" + +#if _MSC_VER >=1400 + #define __TBB_EXPORTED_FUNC __cdecl + #define __TBB_EXPORTED_METHOD __thiscall +#else + #define __TBB_EXPORTED_FUNC + #define __TBB_EXPORTED_METHOD +#endif + +#if __INTEL_COMPILER || _MSC_VER +#define __TBB_NOINLINE(decl) __declspec(noinline) decl +#elif __GNUC__ +#define __TBB_NOINLINE(decl) decl __attribute__ ((noinline)) +#else +#define __TBB_NOINLINE(decl) decl +#endif + +#if __TBB_NOEXCEPT_PRESENT +#define __TBB_NOEXCEPT(expression) noexcept(expression) +#else +#define __TBB_NOEXCEPT(expression) +#endif + +#include /* Need size_t and ptrdiff_t */ + +#if _MSC_VER + #define __TBB_tbb_windef_H + #include "internal/_tbb_windef.h" + #undef __TBB_tbb_windef_H +#endif +#if !defined(_MSC_VER) || _MSC_VER>=1600 + #include +#endif + +//! Type for an assertion handler +typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment ); + +#if __TBBMALLOC_BUILD +namespace rml { namespace internal { + #define __TBB_ASSERT_RELEASE(predicate,message) ((predicate)?((void)0) : rml::internal::assertion_failure(__FILE__,__LINE__,#predicate,message)) +#else +namespace tbb { + #define __TBB_ASSERT_RELEASE(predicate,message) ((predicate)?((void)0) : tbb::assertion_failure(__FILE__,__LINE__,#predicate,message)) +#endif + + //! Set assertion handler and return previous value of it. + assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ); + + //! Process an assertion failure. + /** Normally called from __TBB_ASSERT macro. + If assertion handler is null, print message for assertion failure and abort. + Otherwise call the assertion handler. */ + void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ); + +#if __TBBMALLOC_BUILD +}} // namespace rml::internal +#else +} // namespace tbb +#endif + +#if TBB_USE_ASSERT + + //! Assert that predicate is true. + /** If predicate is false, print assertion failure message. + If the comment argument is not NULL, it is printed as part of the failure message. + The comment argument has no other effect. */ + #define __TBB_ASSERT(predicate,message) __TBB_ASSERT_RELEASE(predicate,message) + + #define __TBB_ASSERT_EX __TBB_ASSERT + +#else /* !TBB_USE_ASSERT */ + + //! No-op version of __TBB_ASSERT. + #define __TBB_ASSERT(predicate,comment) ((void)0) + //! "Extended" version is useful to suppress warnings if a variable is only used with an assert + #define __TBB_ASSERT_EX(predicate,comment) ((void)(1 && (predicate))) + +#endif /* !TBB_USE_ASSERT */ + +//! The namespace tbb contains all components of the library. +namespace tbb { + + namespace internal { +#if _MSC_VER && _MSC_VER<1600 + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +#else /* Posix */ + using ::int8_t; + using ::int16_t; + using ::int32_t; + using ::int64_t; + using ::uint8_t; + using ::uint16_t; + using ::uint32_t; + using ::uint64_t; +#endif /* Posix */ + } // namespace internal + + using std::size_t; + using std::ptrdiff_t; + +//! The function returns the interface version of the TBB shared library being used. +/** + * The version it returns is determined at runtime, not at compile/link time. + * So it can be different than the value of TBB_INTERFACE_VERSION obtained at compile time. + */ +extern "C" int __TBB_EXPORTED_FUNC TBB_runtime_interface_version(); + +/** + * @cond INTERNAL + * @brief Identifiers declared inside namespace internal should never be used directly by client code. + */ +namespace internal { + +//! Compile-time constant that is upper bound on cache line/sector size. +/** It should be used only in situations where having a compile-time upper + bound is more useful than a run-time exact answer. + @ingroup memory_allocation */ +const size_t NFS_MaxLineSize = 128; + +/** Label for data that may be accessed from different threads, and that may eventually become wrapped + in a formal atomic type. + + Note that no problems have yet been observed relating to the definition currently being empty, + even if at least "volatile" would seem to be in order to avoid data sometimes temporarily hiding + in a register (although "volatile" as a "poor man's atomic" lacks several other features of a proper + atomic, some of which are now provided instead through specialized functions). + + Note that usage is intentionally compatible with a definition as qualifier "volatile", + both as a way to have the compiler help enforce use of the label and to quickly rule out + one potential issue. + + Note however that, with some architecture/compiler combinations, e.g. on IA-64 architecture, "volatile" + also has non-portable memory semantics that are needlessly expensive for "relaxed" operations. + + Note that this must only be applied to data that will not change bit patterns when cast to/from + an integral type of the same length; tbb::atomic must be used instead for, e.g., floating-point types. + + TODO: apply wherever relevant **/ +#define __TBB_atomic // intentionally empty, see above + +#if __TBB_OVERRIDE_PRESENT +#define __TBB_override override +#else +#define __TBB_override // formal comment only +#endif + +#if __TBB_CPP17_FALLTHROUGH_PRESENT +#define __TBB_fallthrough [[fallthrough]] +#elif __TBB_FALLTHROUGH_PRESENT +#define __TBB_fallthrough __attribute__ ((fallthrough)) +#else +#define __TBB_fallthrough +#endif + +template +struct padded_base : T { + char pad[S - R]; +}; +template struct padded_base : T {}; + +//! Pads type T to fill out to a multiple of cache line size. +template +struct padded : padded_base {}; + +//! Extended variant of the standard offsetof macro +/** The standard offsetof macro is not sufficient for TBB as it can be used for + POD-types only. The constant 0x1000 (not NULL) is necessary to appease GCC. **/ +#define __TBB_offsetof(class_name, member_name) \ + ((ptrdiff_t)&(reinterpret_cast(0x1000)->member_name) - 0x1000) + +//! Returns address of the object containing a member with the given name and address +#define __TBB_get_object_ref(class_name, member_name, member_addr) \ + (*reinterpret_cast((char*)member_addr - __TBB_offsetof(class_name, member_name))) + +//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info +void __TBB_EXPORTED_FUNC handle_perror( int error_code, const char* aux_info ); + +#if TBB_USE_EXCEPTIONS + #define __TBB_TRY try + #define __TBB_CATCH(e) catch(e) + #define __TBB_THROW(e) throw e + #define __TBB_RETHROW() throw +#else /* !TBB_USE_EXCEPTIONS */ + inline bool __TBB_false() { return false; } + #define __TBB_TRY + #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() ) + #define __TBB_THROW(e) tbb::internal::suppress_unused_warning(e) + #define __TBB_RETHROW() ((void)0) +#endif /* !TBB_USE_EXCEPTIONS */ + +//! Report a runtime warning. +void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... ); + +#if TBB_USE_ASSERT +static void* const poisoned_ptr = reinterpret_cast(-1); + +//! Set p to invalid pointer value. +// Also works for regular (non-__TBB_atomic) pointers. +template +inline void poison_pointer( T* __TBB_atomic & p ) { p = reinterpret_cast(poisoned_ptr); } + +/** Expected to be used in assertions only, thus no empty form is defined. **/ +template +inline bool is_poisoned( T* p ) { return p == reinterpret_cast(poisoned_ptr); } +#else +template +inline void poison_pointer( T* __TBB_atomic & ) {/*do nothing*/} +#endif /* !TBB_USE_ASSERT */ + +//! Cast between unrelated pointer types. +/** This method should be used sparingly as a last resort for dealing with + situations that inherently break strict ISO C++ aliasing rules. */ +// T is a pointer type because it will be explicitly provided by the programmer as a template argument; +// U is a referent type to enable the compiler to check that "ptr" is a pointer, deducing U in the process. +template +inline T punned_cast( U* ptr ) { + uintptr_t x = reinterpret_cast(ptr); + return reinterpret_cast(x); +} + +#if __TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT + +//! Base class for types that should not be assigned. +class no_assign { +public: + void operator=( const no_assign& ) = delete; + no_assign( const no_assign& ) = default; + no_assign() = default; +}; + +//! Base class for types that should not be copied or assigned. +class no_copy: no_assign { +public: + no_copy( const no_copy& ) = delete; + no_copy() = default; +}; + +#else /*__TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT*/ + +//! Base class for types that should not be assigned. +class no_assign { + // Deny assignment + void operator=( const no_assign& ); +public: +#if __GNUC__ + //! Explicitly define default construction, because otherwise gcc issues gratuitous warning. + no_assign() {} +#endif /* __GNUC__ */ +}; + +//! Base class for types that should not be copied or assigned. +class no_copy: no_assign { + //! Deny copy construction + no_copy( const no_copy& ); +public: + //! Allow default construction + no_copy() {} +}; + +#endif /*__TBB_DEFAULTED_AND_DELETED_FUNC_PRESENT*/ + +#if TBB_DEPRECATED_MUTEX_COPYING +class mutex_copy_deprecated_and_disabled {}; +#else +// By default various implementations of mutexes are not copy constructible +// and not copy assignable. +class mutex_copy_deprecated_and_disabled : no_copy {}; +#endif + +//! A function to check if passed in pointer is aligned on a specific border +template +inline bool is_aligned(T* pointer, uintptr_t alignment) { + return 0==((uintptr_t)pointer & (alignment-1)); +} + +//! A function to check if passed integer is a power of 2 +template +inline bool is_power_of_two(integer_type arg) { + return arg && (0 == (arg & (arg - 1))); +} + +//! A function to compute arg modulo divisor where divisor is a power of 2. +template +inline argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor) { + __TBB_ASSERT( is_power_of_two(divisor), "Divisor should be a power of two" ); + return (arg & (divisor - 1)); +} + + +//! A function to determine if arg is a power of 2 at least as big as another power of 2. +// i.e. for strictly positive i and j, with j being a power of 2, +// determines whether i==j< +inline bool is_power_of_two_at_least(argument_integer_type arg, power2_integer_type power2) { + __TBB_ASSERT( is_power_of_two(power2), "Divisor should be a power of two" ); + return 0 == (arg & (arg - power2)); +} + +//! Utility template function to prevent "unused" warnings by various compilers. +template void suppress_unused_warning( const T1& ) {} +template void suppress_unused_warning( const T1&, const T2& ) {} +template void suppress_unused_warning( const T1&, const T2&, const T3& ) {} + +// Struct to be used as a version tag for inline functions. +/** Version tag can be necessary to prevent loader on Linux from using the wrong + symbol in debug builds (when inline functions are compiled as out-of-line). **/ +struct version_tag_v3 {}; + +typedef version_tag_v3 version_tag; + +} // internal + +//! Dummy type that distinguishes splitting constructor from copy constructor. +/** + * See description of parallel_for and parallel_reduce for example usages. + * @ingroup algorithms + */ +class split { +}; + +//! Type enables transmission of splitting proportion from partitioners to range objects +/** + * In order to make use of such facility Range objects must implement + * splitting constructor with this type passed and initialize static + * constant boolean field 'is_splittable_in_proportion' with the value + * of 'true' + */ +class proportional_split: internal::no_assign { +public: + proportional_split(size_t _left = 1, size_t _right = 1) : my_left(_left), my_right(_right) { } + + size_t left() const { return my_left; } + size_t right() const { return my_right; } + + // used when range does not support proportional split + operator split() const { return split(); } + +#if __TBB_ENABLE_RANGE_FEEDBACK + void set_proportion(size_t _left, size_t _right) { + my_left = _left; + my_right = _right; + } +#endif +private: + size_t my_left, my_right; +}; + +} // tbb + +// Following is a set of classes and functions typically used in compile-time "metaprogramming". +// TODO: move all that to a separate header + +#if __TBB_CPP11_SMART_POINTERS_PRESENT +#include // for unique_ptr +#endif + +#if __TBB_CPP11_RVALUE_REF_PRESENT || __TBB_CPP11_DECLTYPE_PRESENT || _LIBCPP_VERSION +#include // for std::move, std::forward, std::declval +#endif + +namespace tbb { +namespace internal { + +#if __TBB_CPP11_SMART_POINTERS_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT + template + std::unique_ptr make_unique(Args&&... args) { + return std::unique_ptr(new T(std::forward(args)...)); + } +#endif + +//! Class for determining type of std::allocator::value_type. +template +struct allocator_type { + typedef T value_type; +}; + +#if _MSC_VER +//! Microsoft std::allocator has non-standard extension that strips const from a type. +template +struct allocator_type { + typedef T value_type; +}; +#endif + +// Ad-hoc implementation of true_type & false_type +// Intended strictly for internal use! For public APIs (traits etc), use C++11 analogues. +template +struct bool_constant { + static /*constexpr*/ const bool value = v; +}; +typedef bool_constant true_type; +typedef bool_constant false_type; + +//! A template to select either 32-bit or 64-bit constant as compile time, depending on machine word size. +template +struct select_size_t_constant { + //Explicit cast is needed to avoid compiler warnings about possible truncation. + //The value of the right size, which is selected by ?:, is anyway not truncated or promoted. + static const size_t value = (size_t)((sizeof(size_t)==sizeof(u)) ? u : ull); +}; + +#if __TBB_CPP11_RVALUE_REF_PRESENT +using std::move; +using std::forward; +#elif defined(_LIBCPP_NAMESPACE) +// libc++ defines "pre-C++11 move and forward" similarly to ours; use it to avoid name conflicts in some cases. +using std::_LIBCPP_NAMESPACE::move; +using std::_LIBCPP_NAMESPACE::forward; +#else +// It is assumed that cv qualifiers, if any, are part of the deduced type. +template +T& move( T& x ) { return x; } +template +T& forward( T& x ) { return x; } +#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */ + +// Helper macros to simplify writing templates working with both C++03 and C++11. +#if __TBB_CPP11_RVALUE_REF_PRESENT +#define __TBB_FORWARDING_REF(A) A&& +#else +// It is assumed that cv qualifiers, if any, are part of a deduced type. +// Thus this macro should not be used in public interfaces. +#define __TBB_FORWARDING_REF(A) A& +#endif +#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT +#define __TBB_PARAMETER_PACK ... +#define __TBB_PACK_EXPANSION(A) A... +#else +#define __TBB_PARAMETER_PACK +#define __TBB_PACK_EXPANSION(A) A +#endif /* __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */ + +#if __TBB_CPP11_DECLTYPE_PRESENT +#if __TBB_CPP11_DECLVAL_BROKEN +// Ad-hoc implementation of std::declval +template __TBB_FORWARDING_REF(T) declval() /*noexcept*/; +#else +using std::declval; +#endif +#endif + +template +struct STATIC_ASSERTION_FAILED; + +template <> +struct STATIC_ASSERTION_FAILED { enum {value=1};}; + +template<> +struct STATIC_ASSERTION_FAILED; //intentionally left undefined to cause compile time error + +//! @endcond +}} // namespace tbb::internal + +#if __TBB_STATIC_ASSERT_PRESENT +#define __TBB_STATIC_ASSERT(condition,msg) static_assert(condition,msg) +#else +//please note condition is intentionally inverted to get a bit more understandable error msg +#define __TBB_STATIC_ASSERT_IMPL1(condition,msg,line) \ + enum {static_assert_on_line_##line = tbb::internal::STATIC_ASSERTION_FAILED::value} + +#define __TBB_STATIC_ASSERT_IMPL(condition,msg,line) __TBB_STATIC_ASSERT_IMPL1(condition,msg,line) +//! Verify condition, at compile time +#define __TBB_STATIC_ASSERT(condition,msg) __TBB_STATIC_ASSERT_IMPL(condition,msg,__LINE__) +#endif + +#endif /* RC_INVOKED */ +#endif /* __TBB_tbb_stddef_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbb_thread.h b/ohos/arm64-v8a/include/tbb/tbb_thread.h new file mode 100644 index 00000000..48c11711 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbb_thread.h @@ -0,0 +1,345 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "internal/_deprecated_header_message_guard.h" + +#if !defined(__TBB_show_deprecation_message_tbb_thread_H) && defined(__TBB_show_deprecated_header_message) +#define __TBB_show_deprecation_message_tbb_thread_H +#pragma message("TBB Warning: tbb/tbb_thread.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.") +#endif + +#if defined(__TBB_show_deprecated_header_message) +#undef __TBB_show_deprecated_header_message +#endif + +#ifndef __TBB_tbb_thread_H +#define __TBB_tbb_thread_H + +#define __TBB_tbb_thread_H_include_area +#include "internal/_warning_suppress_enable_notice.h" + +#include "tbb_stddef.h" + +#if _WIN32||_WIN64 +#include "machine/windows_api.h" +#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI +#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* ) +namespace tbb { namespace internal { +#if __TBB_WIN8UI_SUPPORT + typedef size_t thread_id_type; +#else // __TBB_WIN8UI_SUPPORT + typedef DWORD thread_id_type; +#endif // __TBB_WIN8UI_SUPPORT +}} //namespace tbb::internal +#else +#define __TBB_NATIVE_THREAD_ROUTINE void* +#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* ) +#include +namespace tbb { namespace internal { + typedef pthread_t thread_id_type; +}} //namespace tbb::internal +#endif // _WIN32||_WIN64 + +#include "atomic.h" +#include "internal/_tbb_hash_compare_impl.h" +#include "tick_count.h" + +#include __TBB_STD_SWAP_HEADER +#include + +namespace tbb { + +namespace internal { + class tbb_thread_v3; +} + +inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true); + +namespace internal { + + //! Allocate a closure + void* __TBB_EXPORTED_FUNC allocate_closure_v3( size_t size ); + //! Free a closure allocated by allocate_closure_v3 + void __TBB_EXPORTED_FUNC free_closure_v3( void* ); + + struct thread_closure_base { + void* operator new( size_t size ) {return allocate_closure_v3(size);} + void operator delete( void* ptr ) {free_closure_v3(ptr);} + }; + + template struct thread_closure_0: thread_closure_base { + F function; + + static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { + thread_closure_0 *self = static_cast(c); + self->function(); + delete self; + return 0; + } + thread_closure_0( const F& f ) : function(f) {} + }; + //! Structure used to pass user function with 1 argument to thread. + template struct thread_closure_1: thread_closure_base { + F function; + X arg1; + //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll + static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { + thread_closure_1 *self = static_cast(c); + self->function(self->arg1); + delete self; + return 0; + } + thread_closure_1( const F& f, const X& x ) : function(f), arg1(x) {} + }; + template struct thread_closure_2: thread_closure_base { + F function; + X arg1; + Y arg2; + //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll + static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { + thread_closure_2 *self = static_cast(c); + self->function(self->arg1, self->arg2); + delete self; + return 0; + } + thread_closure_2( const F& f, const X& x, const Y& y ) : function(f), arg1(x), arg2(y) {} + }; + + //! Versioned thread class. + class tbb_thread_v3 { +#if __TBB_IF_NO_COPY_CTOR_MOVE_SEMANTICS_BROKEN + // Workaround for a compiler bug: declaring the copy constructor as public + // enables use of the moving constructor. + // The definition is not provided in order to prohibit copying. + public: +#endif + tbb_thread_v3(const tbb_thread_v3&); // = delete; // Deny access + public: +#if _WIN32||_WIN64 + typedef HANDLE native_handle_type; +#else + typedef pthread_t native_handle_type; +#endif // _WIN32||_WIN64 + + class id; + //! Constructs a thread object that does not represent a thread of execution. + tbb_thread_v3() __TBB_NOEXCEPT(true) : my_handle(0) +#if _WIN32||_WIN64 + , my_thread_id(0) +#endif // _WIN32||_WIN64 + {} + + //! Constructs an object and executes f() in a new thread + template explicit tbb_thread_v3(F f) { + typedef internal::thread_closure_0 closure_type; + internal_start(closure_type::start_routine, new closure_type(f)); + } + //! Constructs an object and executes f(x) in a new thread + template tbb_thread_v3(F f, X x) { + typedef internal::thread_closure_1 closure_type; + internal_start(closure_type::start_routine, new closure_type(f,x)); + } + //! Constructs an object and executes f(x,y) in a new thread + template tbb_thread_v3(F f, X x, Y y) { + typedef internal::thread_closure_2 closure_type; + internal_start(closure_type::start_routine, new closure_type(f,x,y)); + } + +#if __TBB_CPP11_RVALUE_REF_PRESENT + tbb_thread_v3(tbb_thread_v3&& x) __TBB_NOEXCEPT(true) + : my_handle(x.my_handle) +#if _WIN32||_WIN64 + , my_thread_id(x.my_thread_id) +#endif + { + x.internal_wipe(); + } + tbb_thread_v3& operator=(tbb_thread_v3&& x) __TBB_NOEXCEPT(true) { + internal_move(x); + return *this; + } + private: + tbb_thread_v3& operator=(const tbb_thread_v3& x); // = delete; + public: +#else // __TBB_CPP11_RVALUE_REF_PRESENT + tbb_thread_v3& operator=(tbb_thread_v3& x) { + internal_move(x); + return *this; + } +#endif // __TBB_CPP11_RVALUE_REF_PRESENT + + void swap( tbb_thread_v3& t ) __TBB_NOEXCEPT(true) {tbb::swap( *this, t );} + bool joinable() const __TBB_NOEXCEPT(true) {return my_handle!=0; } + //! The completion of the thread represented by *this happens before join() returns. + void __TBB_EXPORTED_METHOD join(); + //! When detach() returns, *this no longer represents the possibly continuing thread of execution. + void __TBB_EXPORTED_METHOD detach(); + ~tbb_thread_v3() {if( joinable() ) detach();} + inline id get_id() const __TBB_NOEXCEPT(true); + native_handle_type native_handle() { return my_handle; } + + //! The number of hardware thread contexts. + /** Before TBB 3.0 U4 this methods returned the number of logical CPU in + the system. Currently on Windows, Linux and FreeBSD it returns the + number of logical CPUs available to the current process in accordance + with its affinity mask. + + NOTE: The return value of this method never changes after its first + invocation. This means that changes in the process affinity mask that + took place after this method was first invoked will not affect the + number of worker threads in the TBB worker threads pool. **/ + static unsigned __TBB_EXPORTED_FUNC hardware_concurrency() __TBB_NOEXCEPT(true); + private: + native_handle_type my_handle; +#if _WIN32||_WIN64 + thread_id_type my_thread_id; +#endif // _WIN32||_WIN64 + + void internal_wipe() __TBB_NOEXCEPT(true) { + my_handle = 0; +#if _WIN32||_WIN64 + my_thread_id = 0; +#endif + } + void internal_move(tbb_thread_v3& x) __TBB_NOEXCEPT(true) { + if (joinable()) detach(); + my_handle = x.my_handle; +#if _WIN32||_WIN64 + my_thread_id = x.my_thread_id; +#endif // _WIN32||_WIN64 + x.internal_wipe(); + } + + /** Runs start_routine(closure) on another thread and sets my_handle to the handle of the created thread. */ + void __TBB_EXPORTED_METHOD internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), + void* closure ); + friend void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); + friend void tbb::swap( tbb_thread_v3& t1, tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true); + }; + + class tbb_thread_v3::id { + thread_id_type my_id; + id( thread_id_type id_ ) : my_id(id_) {} + + friend class tbb_thread_v3; + public: + id() __TBB_NOEXCEPT(true) : my_id(0) {} + + friend bool operator==( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); + friend bool operator!=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); + friend bool operator<( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); + friend bool operator<=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); + friend bool operator>( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); + friend bool operator>=( tbb_thread_v3::id x, tbb_thread_v3::id y ) __TBB_NOEXCEPT(true); + + template + friend std::basic_ostream& + operator<< (std::basic_ostream &out, + tbb_thread_v3::id id) + { + out << id.my_id; + return out; + } + friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); + + friend inline size_t tbb_hasher( const tbb_thread_v3::id& id ) { + __TBB_STATIC_ASSERT(sizeof(id.my_id) <= sizeof(size_t), "Implementation assumes that thread_id_type fits into machine word"); + return tbb::tbb_hasher(id.my_id); + } + + // A workaround for lack of tbb::atomic (which would require id to be POD in C++03). + friend id atomic_compare_and_swap(id& location, const id& value, const id& comparand){ + return as_atomic(location.my_id).compare_and_swap(value.my_id, comparand.my_id); + } + }; // tbb_thread_v3::id + + tbb_thread_v3::id tbb_thread_v3::get_id() const __TBB_NOEXCEPT(true) { +#if _WIN32||_WIN64 + return id(my_thread_id); +#else + return id(my_handle); +#endif // _WIN32||_WIN64 + } + + void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); + tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); + void __TBB_EXPORTED_FUNC thread_yield_v3(); + void __TBB_EXPORTED_FUNC thread_sleep_v3(const tick_count::interval_t &i); + + inline bool operator==(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) + { + return x.my_id == y.my_id; + } + inline bool operator!=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) + { + return x.my_id != y.my_id; + } + inline bool operator<(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) + { + return x.my_id < y.my_id; + } + inline bool operator<=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) + { + return x.my_id <= y.my_id; + } + inline bool operator>(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) + { + return x.my_id > y.my_id; + } + inline bool operator>=(tbb_thread_v3::id x, tbb_thread_v3::id y) __TBB_NOEXCEPT(true) + { + return x.my_id >= y.my_id; + } + +} // namespace internal; + +//! Users reference thread class by name tbb_thread +__TBB_DEPRECATED_IN_VERBOSE_MODE_MSG("tbb::thread is deprecated, use std::thread") typedef internal::tbb_thread_v3 tbb_thread; + +using internal::operator==; +using internal::operator!=; +using internal::operator<; +using internal::operator>; +using internal::operator<=; +using internal::operator>=; + +inline void move( tbb_thread& t1, tbb_thread& t2 ) { + internal::move_v3(t1, t2); +} + +inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) __TBB_NOEXCEPT(true) { + std::swap(t1.my_handle, t2.my_handle); +#if _WIN32||_WIN64 + std::swap(t1.my_thread_id, t2.my_thread_id); +#endif /* _WIN32||_WIN64 */ +} + +namespace this_tbb_thread { + __TBB_DEPRECATED_IN_VERBOSE_MODE inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); } + //! Offers the operating system the opportunity to schedule another thread. + __TBB_DEPRECATED_IN_VERBOSE_MODE inline void yield() { internal::thread_yield_v3(); } + //! The current thread blocks at least until the time specified. + __TBB_DEPRECATED_IN_VERBOSE_MODE inline void sleep(const tick_count::interval_t &i) { + internal::thread_sleep_v3(i); + } +} // namespace this_tbb_thread + +} // namespace tbb + +#include "internal/_warning_suppress_disable_notice.h" +#undef __TBB_tbb_thread_H_include_area + +#endif /* __TBB_tbb_thread_H */ diff --git a/ohos/arm64-v8a/include/tbb/tbbmalloc_proxy.h b/ohos/arm64-v8a/include/tbb/tbbmalloc_proxy.h new file mode 100644 index 00000000..28f6a405 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tbbmalloc_proxy.h @@ -0,0 +1,65 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* +Replacing the standard memory allocation routines in Microsoft* C/C++ RTL +(malloc/free, global new/delete, etc.) with the TBB memory allocator. + +Include the following header to a source of any binary which is loaded during +application startup + +#include "tbb/tbbmalloc_proxy.h" + +or add following parameters to the linker options for the binary which is +loaded during application startup. It can be either exe-file or dll. + +For win32 +tbbmalloc_proxy.lib /INCLUDE:"___TBB_malloc_proxy" +win64 +tbbmalloc_proxy.lib /INCLUDE:"__TBB_malloc_proxy" +*/ + +#ifndef __TBB_tbbmalloc_proxy_H +#define __TBB_tbbmalloc_proxy_H + +#if _MSC_VER + +#ifdef _DEBUG + #pragma comment(lib, "tbbmalloc_proxy_debug.lib") +#else + #pragma comment(lib, "tbbmalloc_proxy.lib") +#endif + +#if defined(_WIN64) + #pragma comment(linker, "/include:__TBB_malloc_proxy") +#else + #pragma comment(linker, "/include:___TBB_malloc_proxy") +#endif + +#else +/* Primarily to support MinGW */ + +extern "C" void __TBB_malloc_proxy(); +struct __TBB_malloc_proxy_caller { + __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); } +} volatile __TBB_malloc_proxy_helper_object; + +#endif // _MSC_VER + +/* Public Windows API */ +extern "C" int TBB_malloc_replacement_log(char *** function_replacement_log_ptr); + +#endif //__TBB_tbbmalloc_proxy_H diff --git a/ohos/arm64-v8a/include/tbb/tick_count.h b/ohos/arm64-v8a/include/tbb/tick_count.h new file mode 100644 index 00000000..bbc92476 --- /dev/null +++ b/ohos/arm64-v8a/include/tbb/tick_count.h @@ -0,0 +1,136 @@ +/* + Copyright (c) 2005-2020 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#ifndef __TBB_tick_count_H +#define __TBB_tick_count_H + +#include "tbb_stddef.h" + +#if _WIN32||_WIN64 +#include "machine/windows_api.h" +#elif __linux__ +#include +#else /* generic Unix */ +#include +#endif /* (choice of OS) */ + +namespace tbb { + +//! Absolute timestamp +/** @ingroup timing */ +class tick_count { +public: + //! Relative time interval. + class interval_t { + long long value; + explicit interval_t( long long value_ ) : value(value_) {} + public: + //! Construct a time interval representing zero time duration + interval_t() : value(0) {}; + + //! Construct a time interval representing sec seconds time duration + explicit interval_t( double sec ); + + //! Return the length of a time interval in seconds + double seconds() const; + + friend class tbb::tick_count; + + //! Extract the intervals from the tick_counts and subtract them. + friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); + + //! Add two intervals. + friend interval_t operator+( const interval_t& i, const interval_t& j ) { + return interval_t(i.value+j.value); + } + + //! Subtract two intervals. + friend interval_t operator-( const interval_t& i, const interval_t& j ) { + return interval_t(i.value-j.value); + } + + //! Accumulation operator + interval_t& operator+=( const interval_t& i ) {value += i.value; return *this;} + + //! Subtraction operator + interval_t& operator-=( const interval_t& i ) {value -= i.value; return *this;} + private: + static long long ticks_per_second(){ +#if _WIN32||_WIN64 + LARGE_INTEGER qpfreq; + int rval = QueryPerformanceFrequency(&qpfreq); + __TBB_ASSERT_EX(rval, "QueryPerformanceFrequency returned zero"); + return static_cast(qpfreq.QuadPart); +#elif __linux__ + return static_cast(1E9); +#else /* generic Unix */ + return static_cast(1E6); +#endif /* (choice of OS) */ + } + }; + + //! Construct an absolute timestamp initialized to zero. + tick_count() : my_count(0) {}; + + //! Return current time. + static tick_count now(); + + //! Subtract two timestamps to get the time interval between + friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); + + //! Return the resolution of the clock in seconds per tick. + static double resolution() { return 1.0 / interval_t::ticks_per_second(); } + +private: + long long my_count; +}; + +inline tick_count tick_count::now() { + tick_count result; +#if _WIN32||_WIN64 + LARGE_INTEGER qpcnt; + int rval = QueryPerformanceCounter(&qpcnt); + __TBB_ASSERT_EX(rval, "QueryPerformanceCounter failed"); + result.my_count = qpcnt.QuadPart; +#elif __linux__ + struct timespec ts; + int status = clock_gettime( CLOCK_REALTIME, &ts ); + __TBB_ASSERT_EX( status==0, "CLOCK_REALTIME not supported" ); + result.my_count = static_cast(1000000000UL)*static_cast(ts.tv_sec) + static_cast(ts.tv_nsec); +#else /* generic Unix */ + struct timeval tv; + int status = gettimeofday(&tv, NULL); + __TBB_ASSERT_EX( status==0, "gettimeofday failed" ); + result.my_count = static_cast(1000000)*static_cast(tv.tv_sec) + static_cast(tv.tv_usec); +#endif /*(choice of OS) */ + return result; +} + +inline tick_count::interval_t::interval_t( double sec ) { + value = static_cast(sec*interval_t::ticks_per_second()); +} + +inline tick_count::interval_t operator-( const tick_count& t1, const tick_count& t0 ) { + return tick_count::interval_t( t1.my_count-t0.my_count ); +} + +inline double tick_count::interval_t::seconds() const { + return value*tick_count::resolution(); +} + +} // namespace tbb + +#endif /* __TBB_tick_count_H */ diff --git a/ohos/arm64-v8a/lib/libtbb_static.a b/ohos/arm64-v8a/lib/libtbb_static.a new file mode 100644 index 0000000000000000000000000000000000000000..8e9c736f5b9e095f2d738d49ac4b63d7d04daea7 GIT binary patch literal 556120 zcmeFa3w&JFbuYZnjAWmYWNd8tfiXxnw3Qs5VMHcSH8p2zLk4euGe?-?JW1>`u=te%XNOgxs2uh-|xpFgIT{{$kM)l zIKgI3_x-ag*{uIv-}jfWS-<**lxq|IyzU2ifdjvah+if|(mO&FtIR#7z6U?#5L; ziLI+*v2A-eZy@~qFBr3Z5dz1Bz8)r(yC%14!F&Y!R zxpMbIEVmIQN7WgjdZ*M2@W=m#mq&{o5*Ok?0O?O>*25BQ6Y0Jx8>Xm{@#{0T@xSmzTdwWyc zI_o!Y+!BttN;@5Pb-Zm?yt6fN0U56iBhMK%fa#b)9em6bJ*7V#8|!2>Y)tC_rf)yJ zshflxo7SiI9X_bkLpYOGcXq{EySn#!J*&ktS=ofSCHqVqNz*cMJac67g={m4wYJAQ zx1kegZ;wGeJ5rtTz66^|4;zVSioPq+suVnIcM@DOT+urkuie$Si@HyDKbr-iZ9QGP z>pMCGm6?#JusWO8_C&m=p0m+W-{NX`zD(HNmELd_C!n{bBF4+LrOw@)eW_Mid{NP2 zEqhTq$y@fU*+i%p?%9Lp5x2x!chq-m&YnQT77Z^%AJn}s1~K>cT|jB7 zl8JVV>EVjRp1wq9TdX&+t%F9E&9a+xv~EA;tF{Ddjc>f~-_jS3v?n^Z^(AA8o}R8A z^j(b`Ta~cF<-Ogh&REaxSVw)9Kd zLAksUJ#wt*i}&uRZcFs`^>popDxnUu=;FdfVl^i*-C(#O>um@cIscu<>%ed-o zjkhMMBISe|>upUY+WOlQJ+ZD9-avb1j-c5XS`O8g&V?)5pg`Nud$;*AY%%7K=EGP9 zglQS8}-Cv5i@@xH!zYf_h~7mF?eR-x@7YsNHctqvm9nd(bn0wJvcK5}}C z*vW`yNvnxOZ+lnYOnHgeqfl<+xpsFv)i)DOvJrG0;w?{EW@(05INaLZA8YMv zk0m;H`J@o7rw?6R$NEjF^7?uzv{hT%RRJu02!~x5a>91~mKbNHKE)@0;j(KnJZ+A5 zwsm=!5guf`87$x0-`U!i>gwczT2Gm5ZSUG07BejB0>WjL7znzPG_u5ZcVd16gsz_Y z`f68)Vwd70kkw^x6Rc{1U5CE4H_-uJx_T1fMq5ekWYyaeor#`QtKDIRBch2)J!&Lv ztQ0!c;WnPa9t5hm9`$$jrBKn-oo1PFbtigj^kt&oNbE^z%_<|uHR*_*7*p!g_;zbe zjn(A(VjW#jurAmin2n~k?xkTlz9-Q}b?e#Uoc(Tnef_S@8=EX6!QwSS6G9jjeX(u* z@t(GruyfXLYALI)hZW*6GtGeDsg99wXMYF!*i>g5%m{0&30Ffu?IDPZ(p`6c$TjK6 zT%1{BdupRkPTobrsKy6vSE)pu`p~jENn83=kUl8&xAu9GN~_$o{x+3zDNLkFbTh7D zJ+`YnOKE4Kkfq2oknom!CIl}96b(IGwzVgbkR5Om zi3-d?yIN`NvIecPSXD>7XGfy1e!IJ~S7@o;SXX~vtZQp*cUR92kA@@@R&}+u_IK}% z@#4@pE!rODI(&?E_g!u+l9X!TeM$86rFs*w&ctq>?fOk(h*iI1eS5nT^%3d~d}UUX zjQ3*j<8l*5Tdyzokw{lhcQW3IVq%1W#jg-%%6y~(lSFiI3Ha<%U6=;!<*v6xqRl_= zN~h#vG)aQrAVD<^`@}?)^F~>{#E@zcRJ;o$Cg&g^<~HYu?3TXrSS%Ti#A0m;s1>*K);DsC zR#-m*5f1X0Y;0XU>$jB0pr)0=F(N0)7feR=;*yh!Sgf}jzWG>7|JJREp7o8>GE+g! zgv+=qVrvgZ%=*v6v!WI50!h}|&W)K6l%jS>%*`acTRNAOT>e>#K3p4%#oO9YTdg~6 zcRbaEbTjHxP3n{^ur*uqpJ{rhx9xoAtiY4JmfSJToR)lznsl@Y+ZV?2Ozh-c_0#2G z&pWh~*0>-32)48ry$lA2(aN!CEudC`6INPdsrPu1xF(IU?&y?J&>NBPP zc&a`_4vwl9DL=ldchJjJ_0FUjOf6pZj`WVI7v6hU)hlqeRqsl#RlQeTW=QXlv`G2! zN!md#lcb$VGfUcgdBe3x9J&75EwMUjev;GLZL@_dDb42+eVbC9i7jc)B3lmCuthug z$QtYKg^9MS+{37flv{@Gu2eh*qppoygGlS)bH_m9m)e8L2t1<7Jz^2=?&1pLWm}fR z^JJxOg)0uWcRN-kY2GcQ0at}v7BwV_YcVU3>%9)2d>D3Z5_0HP&U1$_UFG3M;-;HTX zCQ9zw^d#kAfY*qJ%sp|d<1Jl1eN+B;2w(!vqYG7um0vC^;Q@)8Td(@-a$cO+KP9mn*t z0}GM#D~r;6gl@s@>%*M3-0GmPmbug4t>-JIEMj~7aqV?JuCAUDG5*;0f&y20kLKxL zyZZ8s7NWvuj>s{yPWh9?%7hHbLaV_dSYHe)kR252^N}gr@l0DWoMGtErm|^E4jH(! zX-pdvH)qAiG)cf6Zl@oLKLmc;dg zvooENl$X0ky0maK@?7g{YcinAj|neZHH$1V>q`~{GR3`3UqU z@_clQJyNOlQ(813^-s1OlNCA_Wl^iXh4e zD2XetiW$kkCh7>Lyr5dOsb!3+%pf|eE1E8_(w{~av6UB6xN_StbcSs5z#+`*d;8n1 z@XZU!!j9wYjJLEWlq-n0jt-C5kqp`*Nkz)iLqf6C48nroO!P#OtER@*+28FoIr6U% z&jBt2?>+;Ye+FT@YSJTD9pjTfg$x4}#&qv4+U4}BZN<_s+;i=yI1%Brt3tC~(MgM7 z3J>XpBW-&-j)VkVrdq)C4elFTDF;Y zTD#-1Li+7ccW;#1w#3#rntTi)L^0tK)f~p2IOJ>weQdIdwIFa~2gMv^n_sP+(4#`i zc^p$;S9e!?*S5WBQkp(>xU&nYh4~p`&OE9y>>zFx+_F0mc4^gxB}o^Wg3XnT_f`C5wFl7f#Rf}RQ?LY%^tDxgcQ6x4e~B!yK}M|>+eK@io^fyiTD z3a*BN#_0&OQn~TiIX4q-3e+PxBCHe*cY$lRVdC45I2M!lgO%<0$)>|Te~u{h(K#yim}~^xL1&tT_(G4$VQg#SVW7G=&?m;H#=tt?8>b=igFG#kHq;`w&!Og(lZyV@tU^0@ot9t-Y%Sqm~RUY}xKFl?o|8>S4Bb zwRkAHB1(!QVwF)eVo1p2Bd}T3-wpeMR%8*YDE2^688cJ#7#l1OUG%IS04{B)rkyxy zL=eiO*OafoB4r4F$6~gSgo-dsQw@)8#*)KGSaHZz#2edBNvN^&Wd&6U=;!Tfm`0T& zk)8y1qY7liopsCvnU)ODwU8>3`9xu;=pF0bA*-sO5m``~64Na#K0;;O){VlbOcM=E zBP)C}UAVsDsX=gAXV>obMB6qvuiLw@%8Bv1QyBLdGOoe49h$#U6Q|9~dkIbKcA@<# zTC4NT8f$+ZxUsfWFQ33=L!-Pr6B{vZePuB z)vLwCPESn^*Yz##p-*ZH{L$O}J2fo6cs;d-z(_?~7a7ppa6qqz{!D4KSQYRzF4Cu` zl96yZZD@jQbaUK0Wd*h#$9gdYVY{3aUE`Uw3e$oVbdO8ZGexyDPIwY>$dWZwC4q=- z>1x>yz2|;W@vzFalAQiqufjUh$kHl;`uP)Sj@D@%y8e8^M~jC#pAN6BC!dZqI-ib5 zX?!}KWp)T_M0wy$rPw+f?uC;UvZk4#wvSetw_*>b^g3k5Q0i_q9*^YO;ge$jUwBA)^mDg4PtLB|WuZ3+VY{o1St8EI?h5tG#+`*>W!!R={5d@PZ zB&${dg(c$>YbMIoQa|A}%0lts9TG8gE%3P@%sbVN33eDEa(thMi1V;ouXo}q(!d!Y zXghDyuq+*Q?K`cKY?afdn-1FllD=C=J+C5Tjt(Bcexg)ocfV$o$s{#!u!!x#w)a3j z(gPI~NIOVIfxH3^!etozV49JeFw@>pEt#I1t-E>x&x~LwqgZlBMuk}Za%l$zFWIRu z0$?K2hFK7wX6d|AU{uFukNu7g3Ph~0Yexbn-jK~G+@aigVAYvjhP2Gl6g*6wbn?p0 zp5E`@i09^ zR;71!{OSSvLaX6hVncnpfu0gfTWnD@WgSWzOp~e8DofK+NYeG(;wWa-`$sh4oCY2j z9#?WLe^n5kmoT;TQaFq+S1Fv&{Yv3THvNH}|GGbi^PxM%e4Ue%L8rS#!hbF>)z5UCCd%Xj?l?4O(ojBcABm!5X`hrO;xv77HJ(_#)Nv`>77u_)x*#XI!`Dew}I5RPp1$H4`v90 zb9UWDPS!jIiarduUIziD7hGk>>k((-$(CY|44Jay1C&ov5ow;@PH6b` zsK9jRK{|h;frUMn2YJrZBhTCoAu{zEggA3tb*D+==pnplr40qnAC%{XJj?8$@Tymc#d{o)E{SkC?FOM0Z4Ylo*w4q12}>>Pr8y-sBDfqM z&eYa`mgcEYC8$y*uud~ebIG_0Vf7E;X)vqhS!lFOEzjDrn&Nc0wbsdKge{LQ%N9FL zgxRvIRyXaA%!5a7NoF(k6|vZK(obc$MwCYiMR>kwHiqt$o{+}^oluqL7!z=15om`K zcoR#jzzm+OnGy0TygF@<=2KZv<7MQ!APcE;N?j0lc1%mCoEjE6424{5y_inQl{9JZ zq_cI@;cr~j7sf?pe<>swwS@p*ka%7{+S!m^dL5I}wW5!dP-OIn>s#e5H zU~Soo5#sK&NZEQgR@%3yK)1=G20YHNx7`m?8f)l!4f79s3U|MXhrGw(RXj02W3M9PtiGVgG7|90hOT z=xUdaT|JD$i*XeKWRK>yA(^pe5$z)m58=xLJVy_&ut&|*^429*D-cp;9X=w5GPIP4 zJL=Jc^+=iGfJS9bl%G3p4T{+)XrzK4q}bU-mO2K#UOM6fEG24zL zf*2XTuZJd*U`sAqiA8X0x{|4W%EP2cylzOyJg|>k?pkeiWTjb^qBFGZwyY~CL&TUz zbym9%8MIfXGnKSP6*N1Ix083YZ_cVu5Uz9k$i}RYJO`;v)?C_xqUw?}A$TqW`yyb7 zpIHt-@XVn+Y}sb9&vz!8uw!98x=bFR&UcY`bkOCM6w#rS7@aE=5hgDVM-lr@65U-` zUvjnA>CpIG(v4PYO$9tFL3^w(>?)CKkHsK{tb!xu0<7y?GvR^`lM$|Mrbd}u zE4Gyue>Bcujmj(0%qA%_A>nIRc@lAV;GW6oAbKs1UF`hvC|8;TJhRu{`P7mcX8_J~ zy;6iP-Bl?P>`^HW=X=<*Qk;n{pi;6Vk-1WwM0_g6nasCR90}7Z#qrFsD&c&ZUMXUo z+f@tBW%3IUW0oW`i_uBMQ;eBCS!KQ5i13}c6DtcmxBtoyDI3?7j%$sc-b*aJNNRnB zjwrCbTY3)Rl$iwB4To&<^^dEH&Fn-9VVT& zRYYv|!F-i=8EH9KYQl4<=uNb5wJVgvbTdH!yaE)$wU)1uM_R#v*|V2VAGm}#A`MNB zkXSD)%h{xbttt6LYLp0LIGeE#mK6dP#oPAKHmk6`kv0~%-LmC;=18+wm|`)$LsZ9a zOYOo-ReD6p`Kmu>I6^zV5FvpW_ZDo5kuQ>zAw*nq=@9~vGD`@p$y9qAnQ`7y$Og|L z2M0>EofTsCQl0(6Sj;xZOz0UiE{|9l^NOaI&{)Q6mbp()BQp!yYXsWbxDL=PkV_mP zz1cF61(kF*EaR4IBVQ1<8kgOFXcnNQ^-e1{&8Bdga<1?U#{#Wip2GUD^K@7vX?Z%H zIqVo=3P?)XEXkyska~~dF!}XK6HWz|;8J=&h9jS2T*VfdwJ)b|!VGmk3|*h=q<|RFNg65J(Cb zN1P#ql^n~Wq{kWAt`_G;bbT}#r|XJaIC{&}Dl%=t9_#Mwakqp_sqGd) z`wDU5Y8pLDDY7KNO)Yn)F+(zzuruY#n`hSf$@SDo6~-R!w~?+yp=jKXvA(^&zU4Yj z(JlP?rWiRtz1DoQA>ENRX>LS#brlsj&WDcahK1&zW_L0!bycQumn2d{8>#qS0;_%D zg%@-rH})l`W#C4pb0Y?pT@`-nj-m~j$24H(iQyZ?3XCIJ9v8SsY?{^dX*7*S} z_LL%=3CBJXPAqY0Jya$%dv5HC0I?ez-tN-0W=)LzqMwIer0t#bXq(RDHR%r3b(jEn zbO6&}ZKB8{HT3$jhU0E%7B+1<99c?{N!rZGm!x+$TS;$+L2cZ%*If*b&9iO65ZCFSF|c80?d4oWuwj-4LE00r z$AcxPHWrD9FS(m_S7m+5Yq6r1((nag`T7FP;5^UC4fB*V3Zs;g(+uUzno~&@@6sf~ zRENHjhX6$3g}em{?e%a=HBwe5qi(jSXrIQu9=D+2r$YJ30PEe`*&3t%mkR)|&bD4w zY^f1E1W3$#L#^A)#BR1yfM16wV|NUlbOxV0B9XF^M4B3S@?5d5%X1Ax3%9fjSdqpC zI(orIIP9;`9_olvyVSg#%PvjP_@q@i%zU&CyI!b%#Cz1l`!&?TiDK6U;q5}WuPZ}2 zWQS>|?sfd#cGuCXrrj!_#`bW$8;5EM@vc)0h@6pVK+_djE8Xf z4ZVp*v6pD`b(=D!tdwy@jo6*Kg&ZiZBH2{w{U<#MF-vRd;k!LEq$W<0`4)a|)wS|$ zCjayi<)Xfx?=JfM)+WB|*yGIx7DH$mIL-;LKtZ0|K$;cf1%=BG+Y>7i99OGi&#e4cp+4Y0%YV}uEZAGBt#_4MI1VdxLi z<)A-th(LRkvGhEh=w$Wk(ZOzJWO@%~b#*g~*fw6s-u6VI zn-U2>a2XCB$BS^uy(0ZkXeBW(SBoORPYxgg9xTJ4=b2EfW6I~1jM@UV#KlQ2N8GWiNs$}}+ zJWM&%Vmj?$hVn#KwRU%}!l8Yi(jUr~gXm8kVJSME_q#1U39GkPdOnm#hIl^X?0P;C zyrw-8OVc>`x+g^)hW5;H@V@;-KYHGVs)Mv`2)QPNd(DQWA2?^j)xjznB<+L|8?LsK z^9U|CSj9EjPpH+=3LwfOjZpnr+O9_XffW^}fpENVcM>OdX`pTuO6GvMxzIdfG?~R7 zVH<_oA30DRkW_E08v>&x9Y_l|DV&wk+M5m+-Hi>~Q&Sb|>Fclpzw9TUDobx9`cH8$ z{#R90@Py&=nwlDZ*POS%e$%x9!(i%Tus>z?F9l^RTB6BP^s86y+1z{qHN|GL zou`q*%au7cyp!4aekogSglzcclJ2ho2Wj8Z+bIyq%I~>S)G|0{b3Lg%#cLJO6oJ}1mHI}hsKs^>HcqhmPf;q#v z#%NB?L%GItIn6?8C?P&mM;02herV*pXlM!v_~hJ6CnjPBcnW~GAoK$|5;TU)93h(< zp%3puPu>XzxSkXli^t!C&QN{Vpqxc|eWUCoGfFdndZA}j-$hpaEOFJ(5?SB2t}Gru z;6+>N8__;6L)tBTp; zH4Rf8=TaRnL>({Ubz2GC!d!M@m(6mAh8c%TdsI z=|fvD2PMBr=w6bs$BFm-&^^*Y(m(or20FJJx`isdCl`A2Zm?)&(=VoYT7rY`mFM@Z zL!GWg-L8SY)s0NfuIIY;F52)rIsf&_L9(Xui1ID8%11EzzVrD%hkhF9zfgCK-AQfm zZRjFY|M==F*;{5EhflVkZ!vQQHqV1x_Fv21;=cm}n+xdoSLJVGU^8eG?w@7w=Yy!X z9R54-7WFyiuMFM~nZLCj_jzpKuGFmu?_VE!=Kf;*HW~&tzk>ep;0j}e_zK|nS;&7t z*2mrRsjb&R_CFY=w*B_p%O@1yA>teQSIT*3$4kL8V3X{i(9tORBBGf@TVlWG0ej2? z_7@(ofAE0)y9dmu<831?--YS0X5s}rQJLPJy@u-${r+%n(F9%TTg-}Ie1Gn;iT7?a zQ0@Tg34O`aY-kb6`i>l9MgxJNh38|VBePKE^LeAk(GOD}9Aa(KToKd*F$@Ql(FpwF3_rN)@WuD197O+{PY#27O?Yhk__ ziry8o{Z* zTOIr{$&kj?>0}rHA5;#nyv9D5YC3~9L20PX=<+04QoHag>wBn!A6(QhLH)q6EJw(U zPBt*14;yk=6WJ`JSE@Ydi>>mE%JM8l-WOCJDKnKf=^~Xy<$nleqrQY-dAO>&;CggJ zNY*pi1PgdQ`<0*i0kcljPe}F&RAy#`PLq60w2Rd=HkA$DZ<=h9V0s@&GGKWFn@J8- z4})l@)F%=RvyqL^@6Z8BANZkf%KHM`kI4IZxc|PqFU0-x@_q^K|3%&}!~LYZFJ_bG z9MM0RSF*`wj8RmM5XMTApqc-t zxdUTxa!$h?W@+dF>c5k-()-SL=Uz6Uw>Ms9{6=*$fU z`ccS+x3j>hYZx22=Ga&39(nNV(MQ=ELfWF+zw?eN{%Yq@SxSwf}Jk&SZ% zkNfAO-dK)&R_2Cy8O%9(hw{Mx2FN=v1Q{TG9%N9Oi@puE-IebPM?a2_~k z_q57Z&(f3G(a7m=Jra2aY{2%->XR|u=Nmr1rMRTLDqI^bE#^m%7Rwh36xXgQTUAzE z(%!zSMYl>()eVysT|&byY=eZ3|B8h*wmV z7MG9&+KRh6i#Kih!gYdJDT95%KgLa)nQ`;w#-#;CjOJ!bB>V}wZYsEJ-tPx~FX!%A z_n7!e9cB%|@j)NL*t(16{XPs2qSZ*h@xf32DaQXcam!w#Ym-Lnn|b#Zd}|&n26&L3 z;nPh28Xai>D#rI=eCczJ{`t`P5j~?13E3kJ@%Ros$ETS87krzP9v>ZA-e zSMr~XodON22mbl8Lss9q=)QTsJOBQ|2bM6(4exO2b_Ke?Us@DG|1faMS8c~1l%n*0e8zyz}<2aaJSq9+$}c&cgsz{-EtFfw|)w^ zTR#Qdt)Bw!)=vR<>!*Ob^;5vz`gtSjae1Ja9`g^cR@@b1Ao4100*0mbK^$2i>9E9&3r)93@=4aE7(u9rO`IeV6u z%dVHh5`F{P%Y0_n@0$`H^nu^Z^R?^oDw%$s5BzWO+)Ms~ZSXB%SYbI`%lFN*LpG>~TtAh5=VJ zB?*3+?E`<3=WCCfWfG|pS1&s2peJ7N5cpgXu=^9G8RvY`KjQ=cEi@G``j`8pZ$!Rc z=^JG^E=9%BghzdDmNx|dq5|StKoyLS6aGC79uZ0S{|0&QO#hG%d_be4=S#L1eTdGl zi(CG`0eVXxC;dSUuBX3KgX?^fCtT6d;SXu>Eg}j3KkNfn^;r!*m23$?$-c$`zemy} z{z;eAN0-ly3W%#8ebw_j>6b}dr`*>0z?ICM>DztM5BR|Ka_RCP_DTOeANUV7c)h%* z%ZnOZua_JR-X!nog66_MR4(fC)h8)$2(HRbpfeJ_6z~dpPZwRE%QbjZ-V+@i{-_4m z>Fe^*;jd}w^?a9TaJ`*grNMPMM>V*fUZ!PRXhIvZ6YTy;9@HMqJZ zI$uyfa8)BtF5p*aa8-7K|B41zx=Qe0)!<2aPjEHX5$Lc6SF$CL(l<(9D$~&AMR`x~ zl^T3PgMUJUtA3u+D;=Z9#rRDhf`3xn^8Zl{zDk2PYw%BL@IDP*rokVO@Elyj8vIc$ zeX#~Vsij}8!C%wTuh8KC?ntj_70Ujc%5|ld{xS(CewG7n=@xBKwBKay5tkIX{@N`u zwHpZW|9lrzTRSeRa2!f}6%SQ#o{8b(K-11&>Bg&JQ*_P&_6}ov+69rLcWcRzGm!Io zmq!gTe{D~Kzv$xv__enXcK7!tiD3k+#J2Y1nB&Hc7b1Z$UPGf;21P+di7f2k7mrl=In25$j2w<#rPdwJmqZm^?ef{xvwu+AHS@pT=Z}=qNpCglqy@##wzSd+~ zUfw$bVmEZ+*wMDw=0qo+mSL{&0o=yp30v?eQh8c9;Zz&LVzs^ z_-J>12BBp@p$T^og=wkll9uH3T<|8QGFxA=$KYP#kB!c}z3|Ai~*mj4S^ z&}{|#+}gEiEw!AmBly2(Rha+hfbs|j<3GUsf5e3urX=P3e+2)Lkl%-8=fKq^({_I+ zCuN)-{~y7B(BuDgYf3X@W_bH6@FQI8ffJRm0(m7{iRNEw-leX7_{x4#_C4VfOyQ}k zAHK4ab$ps1C_Hub!~ZhU(nsf?<^>8*UH$NtJ+9-^d_du;s~>(JWsZ-IPj;@tQ`fnW zKbfjn)Cr|6;fapHd#07@dwUn^x)0@^HyxN+jgeDYWR6y^6- zWSscl@+p5K{BZPnLR_r>PW%nv)2sZZ6zEG9rnqt92La&t4gs8z_^SRC87KZf`Q(32 z;ycqjf3HR6Ui`l)@n2IJ%c~RrPQbnJUz{xvnkAmPIPvT7zze@ZHoS8RQC^++Z~5RC ztECzuu6>;Nx8u22{>5_XpjgADTPOYoAN`3+{K9GQ&qK~$`8P@YH>bhh35LAz*U6=W zJ=5U-(5L+0llX_H!O!#2zXqweS51TeU9>aeA7V`V?2-7+QAYL0D)Ac<-_d;7u*BDW z8H7W5>7U1?`~nUFe=G5q=!tRf#J^GH@4){ziLcfH6d5P}t*ZPEd{esa=P5*ab(a61 zCI3MO{t}5_G7bI;seexSN3+ELbuh&1&yjyp;yd|u{$42CkF%b;C4RSqecl5Ms~7&G z9{H1<>%>3jgMY>Yf4&F)girm?la67Xe_FkD=HCwHz4$MY_`3XQwb+TD>r?+<^vIuh zapFIM{?9A_Zi&AKIP|#^Us6dY{w)gMp+93D{L?D06aODn`5pL6H+$gIsI6*hZOejCMF#|l2~P@hI!4e>?os~BI3 zbEWv=*RUt(RqP3Rg<^0KN8W1mJF zy@3UXUJfiAe{c@%H@M}y=EbM}@!;%%%~ykWllFxy|LM}uEz!I4uxI2(A^VwSq zI+o494RPNkGS67h95?Wrnb;2k9NG_p^4v2)G1;%Nt_sAwaaN$vF zuS210ufsjq>%eh7oO=BKM|}Uwfd%7n?58*j*_`2f*96~e-_;Y3;W?!>gV<|C*V$|m zd*()qjHTlTuy>$lO4KQpJ&xbUkluvclL7WPm6!PZYsn|UhYi8|VeH44jz^yU!>JQg zkKd5{5GbEBZ_+-3C0w?*1Aczrwa|}x=*c?h%UbBonvoB>>PDbPL!?(;dkScOkh7k6 z95w3c-WE3W%{6R@_7Qy4`Wi;uIn~#cR$m08`WnTR>I>s^;dJ{dIF76@x<9xAaq);% zCmIy1UV=R|MaIJMINDq}_R-vdy&F#k%<(9GpKM^Ld~~UaHp0+us2)D( z`ZLsx_ukXhEI6K=OM6V&@v*N*`96;j@_ZX@eG+9|je2iD-W1>e9`ZNQ246?niNDgl zyCx1I|DRwF(aR_g<-Y{^FT3I=O9#7_%{P&Mkx@8)Yd&jQioW7fl;>sa<7$D<5AKiN z{c?c46+*i%HU{sfy*R4fj!F7#Z`B0RcG3%kSQF8C_54(mN=tmk@mtreiQpw%S80zO z^#imY@T2M#wZ&2B740ij{Q$v8uNrVwdNtjCLyjZ$itZ=Tu1I#n&@G|^dwFsU`)G=c zOUIu=JAPO0quGygejC3ZmF<##AA6VDrP!~BeOzdl?!B!abp0jY)5IWe>bt1TVJx|G zIb%cgo9;0*<9Bp&4Zfti1gG|@?rEPR!LS#09=i_%>U}u{-^*bSdtU^j7<;}i zknbfyx%9meQ^5enngH}Nh`u8SeMm0)5{xtZW@E3KF?2TH7^wjtq-XTK1iAwof&ab) zdof;`!%om%*!RHS+tACO<9lf_bfuXM6+(`$qQ8t`tXP5lNoVJxelRY@fKMB zJBocL>)*cR1a}X-_MDW4yWWajFD) zk$yg@^?L*({T#$q>F1NyIJM9d5C0sXm%dv=; z54!n~)J^(b{0`~nVy-v8&vo;2sOM`@-}R{Xb*TTf7&F$4Kp*&+F`Zr@9((faTsB!N zb;Fd`rSke-F!wmgllHm)0(}_b{hB7ACwIf9AiZdq&7L=nTfX}`>X>YeBm~?T-782l7rwmtXq8a%16X7F_)kg1-xysQ%~Yvd^M? zX}{es@`bKWBF%4H%uY-|KYxMm2k2AZ!5H^;3-&95AL#Goe$o^4X?Ylr_DOwOxcq-C z9fm$F;e0F^FGU|s>7dt5R1cK@QRrbDzx6&(*QctFL7xUuF4Cu$HGLu&=~EI{rB5$g z`n1TUPd|Y^aU7{nbWi$3_@qzop*(|Jp9*<-z9sdE%GV5iQu?FHJ%YBpA8py(yKh3- zg(mEqvuLY&yH@Gh-p@>w1efr3yBO`ZA-H6`AMJfT#-W2!mb90B;?}|Yi&Om*ucN9n8-vdCGX;>x`++5=&Hd&B--2Aw87`)A`Np43olvx1fxedl=MB&)Hq1k` z|BUKuJz&f6`xN?(ugW$;Wi%$Ins5#R`t51j#x(Z1+WUGh#*I;on}c%SE7{d#QocoJO40&GEs44qg-rKrx<)W(-?iV;wFNF+>xHgy9Bpi+F_mrI za~YH^PViyqDxJqbw)k}CG4S-T#fgW}Kq2N%3m%xx?k4+^%JXgL)!#s`^s!mZ6&GNB z*b*=f1wJrGhhejl{YCXkV>FHTYHzvknG{UuG|r+}Nwx&0(UT1U8k3FV_d?eeL(k|O zlkJdWDf-(|=pMC!X9A(q<13z78r_?k*n|G|63EgpE;&tO=@{&_Jva-bFPBxNr2Mx- zx4dj%qD%D11_tA4HZbg%!X);b7SeuJneO1vZh3)W{4T=yPUXC37JL5aEcO`L;A1Fn z(HvuOeYAjWNAoPjc4jJl>glF%Ek^BX{?&qJ&xUZ{Ni`Nkc8-4oI6m8bC zD^X0R9VXg(y^?gq_rxf$s-zV8q<{Vk?5!jpA?z{!X_Qx(fmUo|Bl&rnz4qtPp1ws# z2)7vDFXKxeinI3H=QuzK@KN~??mZmB_303mz!_8JT_=If$m=nbOZA zUh%2Pf~4Xne#dh zfY%<#e}&0!M-n!KTLaeo`d+q zH27yZ$lg1o9=^}hlXpD*bB@E|9dmrna=2XJ)t>#7!!0RWPfl?-tT@N#zjL@;Xw;tl zHx4h*Ama8h4xi(KKg;26`B1{3@UFUWg3K)+k=|Wi0e6>Iz}@8)a6f+BauDg=au9I0 z90c4g2LX52uYkMjSHP*qO8+c_XqE?Ty;UWA3GLOLTCB@>!CO!-^O;>P-tQo*9q3Qy zYx3uPBH$fpqxzmY-u40iGY+@q!22V>PjWhTd3iqq_%+~jzTMh++XtBa4u{)vzDicT%~_ex&`_=1pKJ_(OIFT zH}RW3PB?613$E+cn;KlNFJ-?t=^WG2>vU8-QhJ?EgC+-^Pe1x#wk zHnarZ*zwT_YsIFQs)}ttf>T~Qv}3^955adr$UP9){m_Pe56&$Hat{Rd7VupTYG(uO zfUtHjVBY~`AT}0Z<5HOawD!Mb+R#8`KK6|Z=#XjLuxCkj2OgET1@cN`7TIAr^0&JB z;ct@Zbo^ou{ODjoHoqMX6TPoCyCCR5`Pc< zz=!aO6({~YAAA*i-9X6r5I*U+6F&m^c;)}7WL(Y12%m7B_;eNk$9D)(#ZK$_lb6Ve ze*coD=_#RsNJn`<(K?r?JS1f3r{di{&Irmp^&kocMp_BY$V? zHSteP+KG>NDbMmdW3LIHnzR%DVkm`|{8j9=u75PnJMsU`r~DJL{Hp(<{Anz3;%^53 z5P|DcCMT8aW&2SHkX_p6Q+sd$f0Yq{3vA6fhzLYYK^4z1CYcFK9mN#{q z@x1e$4NX7#%#Q~b(RtJCIK}Nyti{-3HVHWwQmn)$5qAPI_aqVHP&E0KpPj+#R>RcX z>Lk|YO=JIelX7kTU98W(i#7Ez#DP$Z>>F68ejV{GZy1 zuwWGH#+49RA;nzeVT}?x%j0@qUjP~lqLZ|4Uko_sDd-)K@sG%t_QBh*`)sd3a2bc4ybZc1yTOo=+91p{*#bPJJwsD1mABU_5$it z7`m7Z;f%tp%6~vBKhdXkZfBXPF2-=){_5iNcUe$O(?2=N{FGMacQ1`j>gAoT&Z&&( zD_uO`=)@+f^U%9rxoh>s?5!yBFau_F2z8!qIVp}&)${A%nd{8OSj(0=qx9{UN!Li9 z(A^f2Y)r_e2(lS~4p3Q0FXljBmA+WdY5g6~Hc6d14O#ltExo6t7_^!x)>pDdjyKkLCGZ(O8fNm!^KCfkGz?0yw>&~fzk=!gUj+N^-a@H>G>;LU-qDa!@t*XB=+V z7jFZg^9y|EYx3c33a~1k-mWh)3h1*NUwtiFaj=W0x9f%LHq!nnPjA=bG5n^_ui#5M z?fl#TfET<*CZx5gVjT&;=|gaR?NRj+&h)Q|{@vc9(3!tsEmh0>8mK=^0fT4W8>hG zhkH38-_}>(h%@B{bP#vyQwms4Tp~p0 z4A5tZG+}8hqL>SXuY6@RUO8m~5Su4W7bO=ObBUA_|Bu0+7rr`AN5?0TIPputycd4A z%)bFR^r8HzNKSkjx4rP6koc;cgiltN6Mu&f|3C7;Cy*2WTA%#KB>qcyP9MtO%cmL# z+&#-+>=ZeG@p;}+xej$;{57}QyO5{zDTX7pRl*0Ey-)E@jfW`bDvCEXgxDGEk-*Qw z|7vq%RY#mvkAL$3^O`&UF;DBR4JxEqk>EGhzZ3jU6OQ%7d+ybZ=ajee^92C(If~+6 z={3b^>EokrBd2vU4jWU`jp;1O=>tnDh+1`90!#T_2p}L+MIOs2-_?#6nkOnPJEN9o zP60YL&h=YjoJu{}8rqvWd|HR2`{zN#0M-ihxw zmNZBNs(<}MdC^B*sb1ARe<1%oq5shCooT~(Mlh#FD*O@+mu{WqS?hsI>CVtUd`|KI zq8}w0D}2?*(pct{385U5jH+~mOFTO9X^!fJ|AYs=*I4oy;JaHdd`}YpB%bRZ3ARA- zo@hb zUzXS3#PxA`|692JH+dh!m3$0@7sr>%O!o{S^+kDqtGwPOueam+V|oAExc)@me^p+82iL!s_k*~;B<~N(>mBlXNL~-) z`VaE{Yqp#o;yX5t5Twj*=_uzU)-hWeG@5S}3 zy#E%iugLrR6`;3 zxu?ivJMnI$CN@i)<3R7R1Z;8<-Xn^BLB5BX9bZ_?PSUvxfMHK`;Yz}#y^zg|uU@U)Y!W(4fqpyRg$o<-PQouW=v|-UUCDU=`{Xe8SyFm>=Q8D? zr=#~NQySD?S{jv~I!}V|JkOap@=4=zit_rRdBkANxOu`T_`ooJY7|Tu#+XqeR2%5x zgL5egeq@+u0_-K@At*h5=Ra)Z+!J7r8r1ftj+`f8fS9N(E@fZ*F06g-pO6^L;-tZ*6qem3)txF+l|q=>_zj+mkQW(p@N4C z*jQ-ZbNOr_Z_|kaV<6vrw7?k5UxIEsKj(>j_GrF|vn5~3-!xocJXElUu_JRDkIgmC z%~^(*6JE6J#9a2Hi_9Z)*-IBKduT2jm}?%M%LeD>0RF_>oKtg+fq6NH=NY%pD@ZTo z)RE;z;gYg8)@|O2vyP0M!-jD?j^p5sT*=|cB}PG+nKYs~ZN{dYW`jBa>)AEND)t&A z@LHf?U=|w;UinTg=S`$PJgf1wS;kLiIkG327Kke8W6dba)RBvg zoGWu`((|~?n0sZ;eTES&xZNMP4;#V;XGAFdK9Bu%-&GQ;Y9|PUW!s zf;l(?<$J;6J9F4igHgzL>d0>x8`+&0O79Gs=kkz(ycEynzlE%_cRDEOf_+S1>&sFCeUnnijzqE9K zoje#Cy}#r!^MQ~NI7E0W(6`e0Xp}F-7l^Z!g5%AglUHmW+V~XS4LmjV_y&}3BhG#r z$2kLa=krIugEi4JI7f}vkcp2Hq&M-ddIcj|Yw)h`7z>RP{210!O~@(_^frK=33_>; z_blic@_p=QaDEBV^EyXs@us?yp@zDXR6nbY#XCw1SyKsQz~94+vn)ssv~EmtpzG7% zrwC;lT&c;ySbTa8WMM)UgSWDi!=dNyHyP1uIMDm;uP{+!N<^r z7y}pawnFg-bY{>PbbJtP<^br?SV!kunU{!jM3P?>e5G!Uo?IPn*zqdLL1_=_X)lXT zE|0_9!Z~>ZI0J&{P?}dz=S1(oq=*YtGv;lAIr)yasW0%1-1pi1Vl_&q79YzJ&9vGw_M?6V~E9pfxxT zsBT2Pi{2b?^J_MH^Skn4!S5%A{rO!zEcy!P`^)LfC2BwBvD~2;?5gIkAH3Q;wrHdQ z^;tB^PL|@@Ffh1u0}F0s1Bb3=19x0qbn`_cMQypYMd*Ku_82FP`-Ch)cf#IkFir+H z87K38#JJ9dZf7S$>tL6y6E>51JI;%1j!qhAx9n!XkKimLgH^1~VLK1-ey8a8!4>AA zQo|g|LtSlzjvj(;#tdT|?`EGwpK`L)$Qc?#+X^BkM%htE9}MhO_L_QUdT9=Oek@qJ zvgjJtL+8^(5mQu(wjMe^^?1pi{de6FC>Xk`cK=JQjXl6?xgQHb;AOz6Vu0=BaWGXDd>N^;n*n>aJXT*x-xgY%i* zB^wrJiX8jO-aqM!@afq-3vXu=O_1O4D|=s{xcFJt4$;D)&a<= zFA_b8er8gqK|Ia~AN_+Denl4flg;g{pNZJ_pz{WFKY%mOf;jUG=j}iTN%ul!1Dk80 zcVn|JiI`V}E?tB(pl}}FO7!iR3$WwGNJD*%IxmmL=~wWZpCg8Hy?6PNLuc^&EY5^` z1#){exPaT{1k>BwMC^zBOgLxVm~vb(gRJ5m=nnN0$qjWU>8v6e6Z%&a9y)eW-Q)oJ z9FpUc7a5aJ<{6VXQ)OuIv(dZyFE>vQqfbhr&kBX8zcZd635aupI2`>v^;^yNM<)gE zV(!rVAku=Kx{?l=1=mkB$b6D_t=ZcQ{ptgKo~|Vzb*<2vuaN%ZS)sWJ`O`e7kzY@c z%xEk%o1%z!WN)3BeRL`P*6kg_A)OosUUIX*PksT{Trq#4Jd@Ys93jkk(0_9}6t^bI zVV_?}xTBykm*jRW#)7G-KjrikUM_p+4S9Oq`v| zQN|E#mgJgTo<7xyzFzX1{5y7X5%MKEG(LyEb{#)I2lm`!a4I!nesq@_M-1qW8b?;* zY@}yUuSA!g-+?oc)YwF{TfhUAh2){iMmE%T$dzLK=w2`Dbmzw1fcU*(jLS5S9z%TF z=+_0G$sBe9Wqd4t32UOdA6v_Y#t{4V48`06eqae}8kJ`+?!8OMuI~7au@YndC+f&P$R8*B^I5b7k`Kl563kcx+YtRbosUQTc^&#uvav`< zfe%~hlJTOOQS_6*H4ldFHbiXEE2Vz#z5x z)A+`4zSJh<9h=Q|qOrhsTI!04{nEaoEtoVE@;V2do2No1&LOKT!5K`cAHWttSxR89 z6d4P~PvsJgqLr)j+0Ok_N6oclb0eR;#(~YmJJ}Yi!S@D~rv>$XT9%pOANM2nF%CO# z$M)?Lr_m?0gXU4tjHBG`rFFF(->$F4_%i=!i0wSL_~=s8C|Efsz$)8EqLZ7>BYh=o z?#H5&be7g($e8HeiZk}8UrIqnB$E*AoHxMV0>Fq*x)0&L1oT&eKIA>W2=vE5|Bl(L zf}T@3h$h*JBxj=Y24Wz|=6M6@p3&-UHR=uTvTqvyO7jbQcO5$PiEpf**pE6Pev2-{ z7%Ta1Fw8t^GfzQY)V|5ajnh4JhM)CD_g@w84#<#n{S}-=MSA`!Y&xooGeK5Wf;t`! zqMx}}@J06XO2{?!U=FvBWAbe0YI)`?VsE%DR|5G^IVe43&11t?LXI17KHW;lu@rLL zfHY+Dz54!C6O|wK^5UW*)&s{$6P8*q$||!D5jL!5}lJ*MD`*0 zn(pj6(&ZA^9{Knt(Y6JB5!d?ihxwyqvn%?q^L7#%e+qTXC!83Y8;pzq-oVWK5PP&IbW7<2 zrRSSpC^7K+mSqb{ZfVFb#8^vw&>70j7++}oN%eP3_PxXpp2_+WXQxqpQyQ9w zP~Y^<=YPP*S1N<*%Ls?ypjr6yZT}r-U>dLk0!RxRU|-J2dBpdc&#gFAin*#f&+mqt zqj$fKd8-*`Lo318jDLp(jtFJS2k@Ju@9(Gn9qEtf$$9e2A09QG=cfI9+h1^5(KE_RuTvAYJ^Y|J=N0`k z&1aTEM-Pa00AA4^#QfWgd(RQJ|B;v@>^SZ}N7#=!O-XU#p@Wzo=3!n~0^580R|6v} zmzm@AtPl3zcFb{7m@l5;{i(O_0{)MoQur>CzdCEAWKf)=oBXb@eUdBdYEAe);^4mo z^LIKM8Dno#F~;Tq8$c6g_w4_A?nLs|-<$~bqkWfzidHJ!(e+Co=Z&kx zIR9i_^kR_E11Q7j z<Fjmom&f^_J5jzS_-Zb0o|;`XXwB`JHMfVJVIE3z4f1a+~r3&!K+@{d*iTRrMKIk9jlcG3u8cfWPKJ=+Q-J({<#V z>Aq_{`j}81<9bAAHkxuhguw?yc&B)Ka{C+fIgvV}-OJ}@GK|TX3lts@Z47hI6C^9D zSE4`2^{()^3EwFBY>3Yy@X3A`ejLa`)k7cP|5Dbzu!RyV1Opv%|Q zolGr)UOyi_X&RyYYQF9Teo_)cI^iy|Rh@cTpSU^{_cs(+i#u$3H}N z0NDTs*A&$ZZZ4|%re+5W2mI}Th0+e7{Yhj8Nd4w=p>h66mn|T4U)d)r4Yd#F+@I_L z>dzIN+KrmGW;&~X>b6^4cztFxL>Q^Qqi{aqX38m{!xh#NOf>JJ2zkARR-b;EJ zTr}Pc`%|=CVZZQmvvb&EPod5Y$Q<&#b2)T=4Zde!C%lTe4$1e_T=+H7mZ^_AvojjU+@d*O@~-IKGcO#x`s}X_T$7s3B9QaRB*u!= zN~8AXD)e3Ox0W7@PA)v~<%tk{!eWdOXPf7v3`B?0-*h?bd(a~qgzLmBf&E0hl1zC& z0UWB2g`jx~Hn`FuvV+eQ5zo=deV}vG9|lKYbDwO^1OH1wpKOsAo&mq7uDQ7ibMjk% z8|AtE>Os&an?uo|`seuIU&(k3wmaXu75vf!+3?@SJQw;kr0)Tph^@i->~%PwUA#MB z_~h?y9zJ=phs`mspSnAD&s3LDJ5|mmr*32?&ny_YhQ{xyR-<+heZkb-XyfRU;RpZj z*dc~JY$@HL@RCD0-0`eYu)>~Eod zNeAEk49fMiF=XTj`-1e1a0l_6^lwA)t((uF4V$nlNC#U3fuWVrqCKT;Y^3Ci#)z^@ z#;!#@z`t^g#x%51f}fRV*Q+zgGo=}W{w@an*^u{Hsmr6c{+2jT7(N^W?TvV1;DesO zLiGCj}adEiC@1Fedpe~li26GgXTyUZ#(L02S1pq zh{|_7oTd3uv&c))qB%Rp7{gUsP;i^T(K{BKhgW82VnaaFRck#kEEdc^w`BU;ykR|n>^o)KVz;7xO!9MPCZ3k`2W1GCvJ>`>&)HB)6)b~)I^SajGsJ_OlKF>l=RK2OIlBxPlz9#Y$S#a4$ka6jP5CmEf8|w;rB}gUaSlF;IY#L8^;l=x@X3NpSAwT! zLw|k$hLdw1Ab$bj()#*1*00ECNWLVjYYpuOycx6#it`VB4g7Urec&+W;9rB^;NLM% zJ8*u^=p*oB(3%tZnq$zBL8Ld~r&8}sAYbgSNxH%xna9`K2grsk4jsw|?Tw&K z>%Les9)*7lYwf~!uo`72xfO%IyaKVt9Kt#n@)B#zA82dLA2`;SneYo!+Na@jQ#?`m zFu&yQW@^SedZvpXI0pUE{T^hm72V86icH3BYu)B)hHXvy(VXYqMw-!YCNah;n@IN; zB!NS=V-j{V@yh8SZItY2u79F$qkH;(IJbz_7Wlo>HYYj*7u8L?js7tC^%&n9pNF>0 z?KK)lCH(>P%~46eS-V#@uv)jY@&R3CAF6&wttC#a4dUHFvtgT<@Sla?Kg)yvEFbfn z0{G8@L)=D&uUy&4!?$H`AE#g+4}w38t>ZXMr749m>vgi_FmBPg9Lgpkzv;WTv7uVD zS!G`nUP$^Q_-J`YmgSqVS1?R=N1Js|=xU1}+k`TQ&1a=?B zVY1U`EWQb2Z~xEMj5S_o8N<8k}gv@Cayp~16>)rf^l1tY=5FnHdhVov)U$x z_eY3W2ZEo0jOa>bKM6YIbEzIc9FO$7sJd0-J;|aZ1bsI!rvEHBvI}GWO|X5>yzoPm z6EcB3*z*|k@q8b~{G`LyrF^Jf=v_nRZ;N(Wbbj?t^X(~adr^H1|7_}UiU}egRRjDH zB}hkoyY3I-b*1nT%Yd`5nx>PViM5TSR*xh{vhPWLWAL|fd;2zFZ_}FE`F)$9w;Q3q z8=%MQq0bGlwLdok{kG0sA=w$=P56*RyCgp7_hJuw>7&X|wK>Yy+19B)qWK-pBN-fp zuPF7Qi0?>-SOu-EL@`#N{3mJ7y&Y-L-|sk!K7i8Qf&Tjv**6cvAN^U}lYi(2d`Y*+ zPekigN3D0ec)vf9##`cz^1-<{gJ`EaC=Q|j!_eSC{^XaY@2H=M&Nq6N4|e8XD6 zXwLl_#)yBx_g|&YMyEsePM@Un7V3=V{#@@RZ**?WX{7NxgT#+FMf0bi>Fn>$?|Tw` z7N65_JCjLY75QEUkQZA{>!43@-#CMBPu)A~#*}s{)zNhRk%h}>or1lU4BT`7iNKd9 zh6Bs+t{Uo_L?1*tdjc>guLL96>%JrKc*2C;mc;t)MUc7TLoWwIy&?!6^ER|Sq$eK` z`G4>p(0rOJVa@JN^4Bz@PMgJ%+V?aE7+?QYK3RQSq&yZB3Vx{_=y`#kCId1``lX+E1m9>d%hIkPxyCr=D@>&X|&r21*aEBVRktMg2JnK^_N?cv{9Z}uzVa2Tc1-2aby3IDWjF|2 zU52XelpYct=l%h+K=`Q#A=`IghaH4{_B!Y+Gy>08o!$GwvB083g!hv`;H_`-crrP@ zU|e+KR3iN%;GGIA@V}U}BbB~2%`##^h@#$gkP^kJE z`b~P}?B9610a;LHysdf(<*C)uhfIlkr3twr_lCGm$T>d zFh^KFXZ^G0oX{cK@6ZgKC}co*Do>~Fsn>`3P;`XX1Iy$FgjuaA8~@Gc^deZ z_<9F$F^_rc7|Oo-f3f#A@Kse;`tZ5A0d7Lj7y?A4<`QWQh?tuXNTxVmAkm;vQ;15f z_9i4363CZHz#v+JVhutq5wxgOu+=u9+S(a9#f~##>$J4e7AtM()c-WLbQ*zaVMHY@ znD<%xLwexb#;p zg>RTn!XEI6D7#$P^qpT@c%K*6DR8fT;Jw%$9{Nu0``N>T@5KfN-;3?xsYkzu@euYq zV;vi7ve~{Y@$J|PQQlV~PV-$0KAn1AcyJ^7lN9V*T8njd^1;^S zgq>#VJ>7cFM0>jRykQICG`H-6ZxdfT%A}r+driC1c>2Q_OLM!?JY^)i$-XFCjIF3W zp}lxs-}Su-_pkP$uOOQYOhR6>VeP4!Mm!q#(0lDHHvM=?O#gL0VFRx^u`@qrfVlHD@_bjCJX-$mm z-w!{>Jhw-Wg-Bmh*C)@5>ADa5q64T;fiv^A1SafwEc3+OuaQ2J&dhs_g z%0%CQ^@ajAZeP|!@7(kw}&ef9& zTV4sd<*+5*zxZ0<#N7|Qi*Mt;`x~nd6ZlNG{NDupZ1UHIGIq;f5^&020p-s>LD*{o z^0$+|*$BOlemUIt?0fTGOHCWMF(qxn#TDu+?|5&PmUn zd=j!qW)99jL0;a3tOMwu&$UuErenN-Pgo!QE&OAe?;Py6w>Dy|fc&Q&C7xW&^}qgK zpLr-9W%DY^?DOdN8hqz%+=T!1c~;K}|2ez*?_2xqpVnDDp$zMU_1P<*O~1$LsX@E> zqVF8BN9=R=g->Ne-evb$r=A3kdOKJ>)9ShlyA zkCAvNj}Ib`<6zSR;A=r1Z}z2cT!H^*5kJoIn2tQ|yzk0qpIs;OxI^W!QRH##(PN*L zc^p80fP1lh?_Lt0$9K^u;OR7RABXZteM1NGOLG|7FSi%vfHB;ws9*Mdl|i({)v#H= zfA1Cj7h8QbqxW7>je9`-WAVsm;P`U^dx=xKed&aVtypY=QKCc*L{f7^*2#RyvDs` zhklezSdDvi`$aO`##Jdnj1eJ|#w^tStxH9lHagr#?Jwqpt3 z)c*FJGYReQMA80U3B7sSF+sGA6Lw9EYa1t`ZJZ$5#vuA3x3=R;D0?c`Ams7#{o(tn z?-T#8)c0=qb*@q8`S??hJ*j-mIbCW{&PS+hveCET!OyCOnREMSfATz(&2nGLE7zb+ zzZUIb1={qrXwx6~)&q~-guXEcWjTG^#Em=A-rnd-dWF{0-%x%>?GV6xWz zNSpfJOYxg<-8SzRWi=LUb6Q;6ydUlLCbV0d&}QsF{)i{mHp{Z2b}J3-@`|%)o14&X zk-a{r+U5pon{%wbMzqZvMB9vUF>Fum(*|5iS(kl^;?%y-JWJ0T^<4FT%jy3;<%F`z z)N@=P=4e68A#;Lw2Q-@(()>@)XEm*b>tTD{K(EJITg8XR*qeN2+i&s-71ZG!k;~}o zUGzGiGAsTYWhKq{NKt!x!?NZw=#)@ETki}t!FAy(85^wBW*%)mzX>D`o&+;IQtSb00GvdtJ{|KVWJ=)aTu{uII z|B&-Ij9(ST{OZF`*gM^lJ8Bv_KJ=`{`(>y=*A$V^*;JQAYs=rj`_QxCvGh+NOb7k$ z@GSxHp0z^45a|6w6!3Ntt6aq7du%8g#6R{FiEum#r?~f^r#_PQg>=E!B}wdWb>Y6N z7~B$^P7PWm|3-P5g|UKIoTclV=-`lh&NkAAblm2CM(?vOCz4rbJCx{hAOhdPKDQz-i9o)!(Mg07;&py`3jLl7R0Yx6}F$Q#OV(I2Sj{W^AJJv5w_I@ z>s1)}2L+;g!Jwmj9ncur%hp@wi+GOf>xfgJye6x}$&a}WNgf5?os|^}E#F-ijomH* z6wQTc*`nS|RS4A^t(#k3iLO%7()W_wY172}VmlSxWKOr3w*%CEpq@XP@fi6|A^x9H zJn0Z@%dYm|d3@q_$~WnBSn+Me+2%9sMs#UpPlWaOx9Q@_G)?GHsOaipXPYhoJsK6? zUBk#HJ+>;k3Qp&u?+c2K>|*QTLU&lvZ61LxP1TpKVd#RmR;cKxOzd=7y(F5&il&^? z*t!Kox!k2_hO-CBqIY#sIEH^aJtiv%e4nBzL*(4TID$A^^y-xLqx z*JT3#oCAMUlNxB41k^c!t2Kb&U_- z7WzLN58_vgz#nnomk9g|4t$Hq*ER=UEbvDi_`eDLA9LV!0)O0r`vq?Id-nBwA)nsN z7$05~^iRZt`1NUl?{wff*pO^l^BwqPfnVpqyM;Zkci>|LeuD#lM%b;=f!7KA6ApZ` z!0C2SeCQVWT@(-E*93u6s~I15i~Qag58~G%f#c4Q5ylDq%M29nRu^`-*@539=odS1 zpTKc<#|U2$`MSkGQ7!`ilmqt*{8k5kkH{~cNHfB@0{^suTGrP^ej^V2I)PU^@P82L zUgf~kg&k@f_+28uOC0!Lgq&Ij{yBlyIq<~-k2>%pB40}#_+0{D=D^<(`Kou|4-34( zfnP1~*CX!O%c<-?{eVZ6Zu-_ zz%LZ?uW{ht5&B%~z&|PQ3J3ljk#4C2PZ8;sT4CeL7KNKF^l_n2sfGJ`apAN`x6~>& z@W&NiW#A1;A3bl-!hWgbKV{M@T(2)^`a&iDkU{^hlCS6Tn!Zxu0gQin_^rw>jf;7> zQsH?9{;x`(as$6m=~Hds|Dn>=YYaM#*$UU|3>yETN_VRv=V_Jha|V8m!qfa(f;|6H z>62^Vs}x>p;6WuPFJ6pwkygBr;~$AT$3GHxj(;R>)-FORU(WH5#GT_Gi95$X5_gV&B<>vl zNZdL8k+^f5BXQ?AN8-+Lj>Mhg9Em%}ITCk{b0qE@=SbW+&XKrtoFnmSIyr*hs&QZh z_^l-#c&!Is=YdB(@TDHOb38+Ye#<$Yk+^d_BXQ?=M&izKi^QGd7KuB@EfRN*TO{rr zw@BPMZjrci+#+%3xJBa5af`&A;}(fK$0rhZj!z`+9G^(sIX;oNb9^Fk=lDe8&hd%F zo#PXUJI5yycaBda?i`0myo*kb;Kxg9%Zns@?+)Ft?>U6a$@DETfo^+AjgW8yY88O z1_d&#SbMcy$!Qu*c&vTvSNKIpH^YjxhvI$(uDy-%M+R#jiWU8)Y{FyhfVRjR=MWxi zH*`rqB=QyWlVmln`L@Vc%&)ZF?w#mv|262R)0GbVL~CMh_`hR3;fCWym#+A)$GYRq z!p^aJs`dPWD94yz1(i}cNHoWa^}n=UPa!;+@L0b_Ywi>d3Hh=1WWR*1dvSKte=N!o z@54}3v2Q0&gZ?>ItX^n6=XluR7=*jY4~=)n|Bpw#__{~AM1_57Y(48#ej6Ys!@A1G zTUG3j9_2m;d>PhFHof+@RmfL{73=q3R}|kFN4Tyfv3rSEO>)oIM?vpqhd*SJJ!9>m z9!Gx@_IIQIFAu-Mv-z&{<)}w)a;gBj;rBv*jujgxY*qQi-fQ3y+dk`6EGYC}YUAZ9 z_8`hPM|@330+kBCP1MJFoBkOc6aE?NuPYS(iSyj`+~$$r60{F)`RcvE9iMeRm1CLK z!V0U{#UAPY4A{l@-tzd1_%B$=-uSsC+b~noS^M|wJ4X^ zxS~)={I-Xm7oomnSl8L<9#paazL4;RHoiy2?md@qT^i~hJ+9$hrodz4ZGC^?KG-wI z-YF2fKMnH(ue9YKQ?ct&uQROcZ9G@S&IdyVn~&%o1@EQ<9-9~F`iOUl1Hamqb6Cax zU6il37W6@Vrtn9RuWRHF9O)h=g@5zF--dj*a(T$ZPk!VoJsfFDEv&|nhN_M6h0I6G{=gKKQ;X|xSnIh#-soT;E&k2mh;0P(ckaDJ3m7BqYnJjkmIK3yHkjM zyMx|B{heYx=)hmgA$*gKYnhM3Kc}$!gBsr>?C_9HkLFi|G+~DaY0Y4?NocUwPoD z)~>^>mnxjr&pCdp!b#3a6{pi(3?F3pb`>Z2VJ816g_C@a?^QU- z=W|&T{Y(zW4|~Wt#_+3|9Gw?RcaNf>(Y!Qj$Yp<{1C$rCCMN3P2O>emym;shUgEdIGySh4dK6Mcq_wy#_&#tb30Tj z8j@44Xb9TufomO1`f^79nu^nDF~j-2nO26Uuyp4zJcHr&9{75OpU3DoF1{qsKcuMIic0 zhF42iw0p0rIGyfdIJX1$2X1Fwj;7t7VRELjbhA{P^x^eImoMR&jQ%8(!}F`xlPF!@ z4oqeAydB{Ba5PDpbm!fdcBq5uc$bk zcB(kp=Z7jzr-KaV^^~`_7So6O8*dMF8$$BAA8uE?gulbm-KTK!b6uu%dB4KR&pCcj z;Uwoq6&K=I`#h21c`7ZU=l$AZhV%SxVmQz5%M9oFO;fn3f4ag={ew&n*Po}$%U8=W zjRU^)cLL3{^bfMIdhquN`;dg zj(0LSyk6*MA4pGLFSav!UN81AoYU`T_#1YvV$VO2{NFM>Em{5`ocnn(!+*`_%NhO$ zhA(C~Pq&`ohZ%hb!#VvyhW9i2B;@4oZ;Mm8X2zZ2uWDaaGtMC4CmvJ%?hXT zoq&HGwkn*;m*YE`od49sIO}yO(*Gk&&TdBkQHHWggf}>?(B%j+Mm*IT= zlIMXJdf=@L=k>9Z;d@wqcQTyoIlyqP=W&K#%kmo-6U!HGXMzmp?NSxPdHZlH!+*r| ztX8;LzI6&W%eR%u`A;V2E{5~+UC(e{zTFJJj^%eN!+E*yWH>L!XBf`?d=JAbnEbsA z=W^&iI0f<_jvr#dob=~XPpP$%J846 zIGy-7hvQv}#>}tQpXy5%%U7|Yr~1NiUS53MRmJ4`8P5ChUWW5=(Q^#vetv-AynV}J;}hP#4KSSdFZ%f_)6SY$E(f&#K;{Mjj=(&9+d!!pwc%tc6GC7l&-L(Ic9FA8jdeZZ_iqlEkEm1oJ z6)(x*_SEeO`S^gMq0`yQ*Y+{x>-tXe|KuT`uRmug8nOe&rz)J}|2LCUsNy7F&xr^M zE1c*#Ue0jt=RChW-Lut4`x})DU-!;a=~BKp&h_Ez2D)5G4qx}?e!}ramM<>nE`^hQ z_-Gq*#gqBrfM?MCIu^QFt(w9gzShucTn z&6IPiqBrGKd&uGK1JBobMNjg%-MBqD{ok4$>XiPbpVYH_as4|O&hxANjqJeb1B%|X zbGpK*+jEB`yt^${OiE!uVOg2C#UCr z#p$=RbUFPVg_E9~Ugwwe$!6*1GC6N+C2_9yzqA zCw!emmoNDV$GQD~r-kAiR&leuv>ft7E{FRe$LBCPTz-YZO+Tq*^gO@o8LrEYpluB2 z_~r(A7c2;Do!HwdkKi1k0;YT#*LXCI3KTaJ(Ze{^5XW)V)}4R23&ZIen)GzSRRi=z$+*c#evb zDtdpnnQjFuFRo868@Ka%$Jg;V9!{1SCixe$bT>1c(-*V#C60#~&i$>F;oSfEx*W$V zJm_y_I8V2Z;apC=2fm%*T+UMr=YHPHa4x@&`3d){tqkYo(#Xn}r_1k8B_f~uGndc( zkjsCHrTc$VzA}TOeMo2a;p-Ta8P4_8a(u|g9QGFJi zkaYQYn5X+^k903#cHrs$gwgZ-euv>f6{q|@&v2gZj~T9`MDJDZ`5yXQ#&B-8=?v%k zzo-+#nd|?r3=gU}>GLlP{~-DodFbP%{|pa$FZ=(1>B;r~KEpqV{xmk#f!otd|Ct{2 zUi$xB3&%OA;$(k4Mk45g=wIxi57%EME85{37`^TnMghB16X9H};)JhZxE_ZSeiy^5 z8U5W1U(fK*Fnk-syBNNQ;p-UQ&+vN~uE(yV&%F%K#BU0OYaQsfZgU8~j{p_)JsQG4 zt3jOaVsh?hxE@y!eQXyr+CWDC0HZ&|@Qn;V$?#1K*Yg6BAKU$ozDA8p34f5$FJ^db zw=?c9FuaG+Z({fshCjvd*e-g@dYR#|-O*UvXZWK`j$S_}Js)HEWHo*!{BedCGyDmL zS1^1B!}Yp4$$5(5>lyu58NQ9-y$pYb;e8C>%kZx={2;@>!SIs|-_7tqviw1Mev9Fk zD?Azaw;5i_=sOvH9iv~(@K#2@mf@=n`aE@Q8>8=L^iLY}VRdakquFIVk| z-k(7F>|*pM4EkzCpEgDk5&hQ~{RD+;d+PVHu~9eG4T2Sw>IK^HR|A z^?O`4Gx^U+5^;Rgpnp!u*Z2KN{x=!@j}3agU+OrM{|}6Q(4g1r6q5szh~)nxqyMPF zwf?25{VHel|HSA&VbJUL*NQ}HJ8L3s(T@7ImOHGOZPCV>nH|y2j+v2L;@@FiRMQr1 zt_c;_)UArPbu_d`BhAq}B6qa3tyoZT{i0ALRG@!HI##zv%ks<0%2utimPXs79nofr zER&JQEsHuz@RInZhGx9PewAe=6w1H;szs5xjg2j}xL(%OR7o^-jq+3@PVuT06T?+) z(P){=#r&eWXpIsUS=CSzsjq3Sqc^_e{p}!k&unP+vS@R(t)Vtzzac&}OS~w2WphVE zV??|ky$sxO_FB|Y5Q)?yz1vqt7gP!z3&pFBp^`|XrmhaJ z@~>SHsjX?Pscq<34Z)^S^7H4es%dDfS<)CSzhiz&bM4AD6i2NaX{dNgq(shH*B4$uW64&o8c8J zks;EjJ!U>P1w$opg^rbN(a6%)2Znq!QTgG8XlHG-l|BsN(1mPltFM}r!iM%p%gT;O z%Tm#t#7tuo#)`RwiY`_uBX>k=R=6gTm~)4U?1HT6=%}f!{~)rU8;n3sK}t5%G&bBB zC3)O6LV2-IE6k2|Mr&81i8Bk#(-C9eDmZ)Iknw#5E`e)k8~uZPw15kbYa%_f7BbtN zkgZ6>?kr#_vtbMsqf+aJsi_5qM`KXj+|tpobajN<1WLX$TIXtHI|=ED`Gsw0b6QaC zTH6|0+Rze58kVA)ON={|w-jBFY_En-Ei?;XD5+^%jjG**FQg#BP~`|5q1x7!k=hRQ zJk6_0^IeIV$9sR?#Dmv1gvS?DJlI&0tOqe1P4aRV6P==qNZEhBuZGG+T3k~T3W;u@ zy`wE!)3l(Xp`fg6B$oU<)m=oEt*mLQi_}LOTcd3YDwgCWz&p%WV?Ic_^Qq{EX^Trk z^7^8VP=VbDM_N1D^o4k%xYQj%7~e4(b#@r%%5!zh5uMAOjW?C0UL^MChGPB~Yb4Kx z%WNJ!c$OP5mDw7QJ0!bfJSaeu`26^G+kGsV!!62q{WwXLn-Y`&u@zE`uYqT11- zmfG5tt*axVL8Q?ldc;UuP4lv7*~04=6A4z5G4%k(95;Y$!~3owt`cR z2$4-pi3%~Riq_QWac7wjCB~U{9t-mA$w^xa40T6CN4=&;H`LO(x>yr{-B~ehK`u5$ zKNvTLZtiGXT~mXnlgE4Hb^mCM0?q?TG?ZG#{2@Ura8%Kvf5X-*WxN1wjE;ov6dQFlRHG@Y|E5uv|q87B0sO8y`_;#oU9HPl?5-$&iKq4 zv^OTi+B-ulTkCAEu}2Q^?rF`W`Zx3H>uSKL(GsC~q4dL0k^WYjnw6v0 z4VSb0S?H7Dlq+jHoL_l^gGTs>B$OAM%930&_vU=Hq%OL&W@TdsRX=-v?PSX@a_V4f z??@y+q{o!H@kL97&#H;qv5K<<^RQ-g1|7Ba z3c-9#{YIzIgt>B@aU->jXp=$E*ccmQTag=^@1XKp7;RtKq#2_1Xv}Nq=d#*b)m>mF zi{%JQtdK-jH$)rjtX5UXSn6qSjn+0SZK(C=DcqW?d^rbmYj2i#*#%t~CcXF$;*urS zOn3%#!g+NQot*_9H(kcc5c#5*DWKm(gQqXImo1d#Q>YQy6qk)GvyjQengtW-C2^R# z<}n%%6}L3AIk3Mw-?S5gn!GxC}$CNE5&HW3Dq1o{o(Q3{6Qpi<~S&qYHX|^gc zO5Qsgu0436@&z?xq_6bx=ESyvIaH*D&V|=jMabVA<0ooduU!-|luLD7;xqGJ?@1d;Xc6q-TXc4ObhE%ndBF#kAa%FP$fFH( zQ9BV9PmL30=IanPTze>M62m$d-__1AjL_Mr z6}8cJbZ}ntFpq9T>Xe_~($4%TY%x!;;PdpX-%oY{EWE*+YM?-XN%5$nm+pvZae z#aYAhF+7jAxE_MZZq_lbMAbsA2<6d8sk3HD!>Uj|;_VGI25N5?x2!NpUD`rPnS`N8 zWK~lll%d%WZ_0C2FWYVSx0+>VAlT58_Vf>UN7$gCEypKd^L}fU&2Lk>dBNW z*}SW;#IO(p&z7cPrMaxL6;gjfMXMw>cuF*KuN$%0in&Xr=c`0oGeeGUlH%pKv;;cA zVHcHLbr_MuB{i+q`{5l%Q^)9J*!Dd(8Fu44JFT4(P?cmZWe1w5CdSm;y^9;he2!0~ z$BzWx=Q@MBSp)B-o89_j)#@YcW_83kfo`bB7Ss-kNF}V5=R{BW|kDw zu~-~ui{q?fhlCykb_7ni91 zdapZA|Bq+Ezt<&wdd@D<^!F=%?mwr_f?q#_!};m?wnWo^U8T?S|H4`D54iCE^I7np zbm9NYS@7%UXSn|K++3phPeWay!1?LD%!%@6x$pF;iva7B$~c{9*CDeJ;#+O zzkV)(=bxSzOq9P=>Ce-r=L!?$*Uv5S^yxXkMEMuH*pHqUOq9RQg`b`)Oq9RXh5vVF z!LOgs;`#r5YQ z7k+x)F;V^lil6&GwaJO{A96{b-lLZ&|6!N(3xOrdf6RrSo|jLQ|D@vQ{zK2nC(3W> z=U|X$3Qyvno{vwIU%$tM+n=7hPn2K3*M#d&?^8~cKj@-==~?j4Q2bnfdj37p^b1}1 z={fvF`StUdJbilZKT-Ztm;6(olqi3t3qL*2pD6#WF8uT!g+%%5UHHRi!LOgo;{H>1 z7W`c<>C=0o6V+co2g}Qkp2ts=UqAQ7^{4j>B+9?jMgO^H!T*fn=law0_=%>!+a-N^ zzd)k=dtLbH{h*2R?|0Fkp2ts=|Da3yGtPp)--Vyv*Pdwl$6WNMF=3+oCtcE~_X{M- zZ>i_@xc|}f#3uhgjm5kgxO$8dF&Xr8h5ENXYkvLv(e&Z@(@8iBCVwbH@~iCHl#>-< zhsuY(VDjgk@1DMX-ZHEi)!F2)Rq31gKcJqQY#o8W1O|4qUszSV!z1t?^w585iex-) z7{=6p`~~jmXR3nN>o8gklmEqY-T508zaB?>@lV#}Z|FHNPI8P?{&#z%f9f2`zey*o z&ZhpCqnzCAzaDZa=y|;S74v`no4ym^#=jW(qL3?3_R&nAonu7kWsr=>4|FH|*`Aa8Cjy;-Doz3+BZnC@nrxpKX zQjP_ae>eQZP5-Pc$wX~74<>&r?B>Q_ih4z1B>U5vq8on@^@>7=JlRJx{VY7s@5aAX z@$2`Zr)Nug{XJ7H#^hhC+dsqqgO^B-<0PB~lmBkClOlbCKYf~H+RAt-YVyB>dgjLe zl;T&-pe@DZALWt$bBcex%_$=$f09T0Q#f7H*N@QuMRfaTQ`~3;mMg9#v zt;;1)Y~ZH;zxME-V#R;g2>hRgpSY#JTk(%n|GLM!mtSt4Ot^A{^e;dIEYdgZ*Qxl+ zN8tanhyC{|{)!R!Z%%bjKP@B~pBaIFzeoP}D1NiN%-^?pM^> zk^jnk$=Edl|6GsycR=wMj=;ZGkAIEwPcD#*BbEQpFwS%Hp9;mle}wecd+=u|#Yd_? zzt-)Cq5oFJe{6*G>pjx%Dw2$*znH%t^eDge3G&zK{?pLETk%7^xL~Fq^3Z>?;xCQk zcDDb2@X-ID;;$Nkf1ii`xw9l=_XzxNWV-v$bBbTTUq_2E^Z!i`{r9Ve+oU&t(|eHI z{P%$7r;99@{In+L#(%IxGCc$NJed6ZC%E%>T`9SCj*xz{2Y+y`A+?@q-Zv^izO3=5wh8%bMeLO+_?<;;Y|MiN0r1GEZVZTnrkIQkvwEtTk{F@Wx|G^~p^6ORn zBb8r;NB$2b$iHc_d-{Q^Wwu93|2f5P`cJXq$M7;PnB{-aBmGLnKa%~wig~J=|JNs| z|6#>%>c36#kL15oJlcR;%ge|otz`^Vy}FKxx46(060R{SH`KW~b= z{VNs!NdEIV#c$fbOYv85Wf3?1=ZHu82NKkO8w^A;&GutJ@du3b&EK!6{@cvwamC+i z@N50`xgP6`I?hR}7w})PxJi47T7NdWKTGq|_?!phr}cC2o4iE7?-Prfy zLJ)|b;wJxhV1Kvt^}<~>cqkD6HTXCAC+YkfMy&22E}15siT_&soBW*~>G!(uQ(Z9m zr;l;h|G45m1Re^M{&n~_`Hziu=ij%IxGbw$93}q^h@1RSU)s;ef9@*DsOtr#U+KcX z1jcu>|6;}8E5!)?$tBJ7|A6tI8^3jjWUOZV3tjk&(EbX3L;q^U&-GvA!v77dKf9&B zS@D;%^s8L>S0G(C{zESOH@fhTALY&;?34+2`Q(}Gf3pk!5|8rVxmxm7%agGGVi*1& zKzTR)^-I4w|0iAeKZEr@H~#87Wx{*qNu+;^3;%zj|8?UJu91xUP(LV8{y&9((|u${}$bT82Rt~tmNnZTjRoi1oGYNzt@F-i3|TPJjy?Czf51bvB-a|3;#N- z|GK5$rT8~XF@nF&h5ud*PzAq{|MYIjSk3sOF8mWR-T7Nx_?NoyzwKfF1B##Pzs!Yy zA=Yo)^sn9^6|ZOIU+=>Iyhr-`9+3P2#=pXaf1*eF2R2InPhFzPf4_(R+Z4Y#iS(&U zGx_a3Wk&vUd!*v}`wNtR>e5X9A7K5`&HuZ$Nd9Vh66xRN!r$P*-|xbIw+sKpSpRfO zKfF~YTrE!`{m;1YU*u7LcPf5v|Ftgsn>^aDJ&K>#pLH($8}#_mu>XJy|9Thxr}g;5 z;Lmy(#0cf`B=UbB;%51MJLqn|Y8U?dUHCr(1G(A1SMih2Qy~B8#=n{VOF8cR!AFP- z-!+jZ!B0)H$v?*<{Yu4O&iJWGHTnPO!N1LgpPD3-|GSgi_3u~wJpa@rnEY3JaxjyzsLOHfD1oWX_Nn_9{lCoWcuauBWk$;otHx_w)}a{w&7-MHl`YtiOozQ*`k-}#lACsi>k$D@KN>ZnS%d&tgeNJ@&7x2_AX#sgGttG3==n_tJplv7am-J z|8a<)i+|W-GrI82Y50HEXZ4(eGxntPr1-4CYw`a%{QngHL`O0(p6$VN3_X}O&pL^w>Iqub#<%=Fp~GIw+8waQZ+z1~{*{0)`Cs}@-T;|vlKRfvYxRsr zy7fun=U@D||L55$*6n*!tg1|4*z?^}gnVO2kr{VAU{&eg1NekZ_j{4_z`QG=x_6f4+<;4`j1)rFSi)GcUt}yR668CRs-@cdS~Ql=MwH2f{#A@mVEJC(C!dUB1NU17Fa}j!R3+OGHMO@TZs@Q*Y%XgXFWqUwtV@r^Lz;X-i#s zn)OLxMIRm{owLEyKWWDW+Aq0XD396(loy^yGhZXop1k2-;oGv#P<_ESt8Cdxw(QJZ zZ}`XWI(fr)d&7p^Mv~vyumrE{YZC9Bo!Q=E&8?`oVgB5z8y4afpEcssBX~^=eM!Yq zCmW{sJjaOei2kx>ycx`-(zu|8QU1rn4dpj13g#Bf3YCPW1;x8igZP?AJ6;$VESZ@% zGcTCi*tn|cGQ2Wvd9=18Sc^Bp*A*>|E-5IeEvZ{tTwH=rtt^d})a2DIojt3tq+|(R z(N|MAYg#aO3EoIr7lfROikq&J!lv2ZX7$|(_rL2R--T(};yaJJ@1k*oRi5^d^e2+O zNPQLIg@hrL(xF0K+Y%76DhWeKmnSQTbDfHd^@vJMKYDB6;nYXab_?2WMFXD}fzqKq zmqIlijCA%8hCuujhG zPYPUHD|WG5;1gniihW<;6CJqx)-!b>@nMmmNAG2Xp9vh9Ho`v$+;5;LbAf9wh+UR@ zhcHqw!jpm?BLyQoB5;f{_CH7BrtmmV8#sAG8Kf;93$qP6u6&HwgNepJ@7{ z0+0DywPNfM>Bh>dRN*UdPO@Tl7Ig^Nr9w{3ueAK<1RnD%{1o99AwO1*x=o^QCi<+H z-)cD+;~?FbKj?Hxm>*Z%g5Py?P4rrecrL#s3LzfEFTdjnDM{kDoc=&p{Psv+(&?k0 zZWy=(zvc7?BJ*2Le~`GdzDPV~ANs9>--_8;V|4bja*W5d*Km~RV}8P~_?aKZ<1v4W z$7AIikH`Ec9*_Bt#^)fC!R%kHE=-62xM`1h1n6V+TKm;|0*~2Q%Q+XW;3kKfe>eP( zaKQ{#zQszPj|lm(^3^%NS>Q3h-Kyvx5_rrH^*w;MaXo|imG*$09{K&52mX2BL6sIi zz2t$f^T5C9fjGA0=G+J1y{`l*WdLf{`~a&$XE&ySWd zJWs_5zlz~nCOxmo>2-M$&gsbyDUkjjlTmRDt2p6&8ehurA{D1oIm2f$yn^Ag8D6R4 zWCxyZ6{FWW6SSDobA4`A_(hPz^{Hk!*Qbu*T%US|^K=^(uFHM8`rT^a7pUJI>a5FM z({~!Urtea?maqM$+o0F@W`$pX{9dWzbjnn5@*iHl+HZ-T`-7H8>6W<2p?c48?Vl!n zuHvQqa(eBDrX1}*Ca&#o;_F$yu4Q^^`*;{P&3bx_;k;aqE8MK7CmGJ^Pbr-8#p}yyhVy!%7sH62*NbHQra(Ba7y4c+ z;k;g?DSEO4Pd8oRM9=F>CZp&2==qFUPxUjFgmZm@N)F*%pQ#Mz>E%Tx5a+%>(PTWMU=&oSid`a{nUP|)-mFIPDE39qMB3OD^$*Ds>~dnr;Jb5-0d zml+B-?Ngy}Q;vc~xpTWEGdUbjQ#hr|%SGqUEH5wpwS1Dp{V>4N<@njAo5}RNT*c{> zrQ%dB9Ort@XY|_6X1;XYH}%)HB6_ZWSjjZylq%fpugeu~_RHGNRE}Ivo?l)rI=^PR ziOAvlb2%K>>6-dzdec8AGduA5n}|Nzwj@H!r+jf-i5Krxr24MwsrDQudcB(kZ(QP(CFEIR46{phyhRHoVQeth1x7<|erSyMR8$PW#4c9qF;xR=a#GJpOI( zao<*}2+gir+0?W;(u$96V~LuUqQzIhy+0LH5el6JaY-qOziMQ$ao;Q-M&pq_U+%4V z{_NOS+3b&=x3-Aa#?!~7he@k0!@HY3ZSZ_#Wjj`KZVb#TBVzDA;0Qo(|g^~QMmd2HCZy^YaC`yD)I{q6KfaM zpG0tH3ze|Sm>^4ts$)4maL^cS8$o)3EWsG@lJ<6dPf2{!$Zm5g@ZCuLy+iqVDe=Ln zIMMc}m<-lXkxT(~V?`BxdBOQzg@ivHL2t{By=b0Z3=bgoVt9IyI{@_}cZ$Ydu+H9V zZoitmbEf@vaYXgoz!8;i=SGE*pNYa}cS3kgHw1pj@5r8tB{u3ZMvGo1b_mC!aWOsK z*5D_=JRJX4O{mT$|3;7W4=Q>+uGL~p{%yG6mcE`3rfWuZHu*Ps=&$E{^(34HlmCYv z{F~L%Wq|Qg)a0izq?`WR6n`}_v0(E53D?~CX#)oZJ>QYPoaqmE*gqNTloa%MOa5~5 zPx8qB9L2B4^*XEXmVcJ*Zf)XvQ+{3rcLNltGXq(QwgW}H$_eU&jNJgPgl3%bUMUOWn}Vi0p`Y^>%vcT zCjb2&`iB+&W=a%+(x)OZ`DyOvmi__Na8~0w1>z@Dn*2wAx$);szy$>DSHw@eCjaj| z>{qS$%Si|V@zdQ|lRv{F|Gh5hr@HXJ<-vbQ@dt370;NxPAgX&voJd6`0-l^}<$=*^gY( zOn;FFe^6~*;q`}hQ<(gp@!&6Z;UDk9|0e2(oBow9{8S~(^n)Jh*SqkOOPTy_9_e=} z{z_&)f=vEjfnQZ|T#bbTV$PF}?E@=!=_IS$G&;vVJvJVlcD9%XtDqz+*b z38O9ttUz*jkoKIW1j2)~pECE%ov*z)d&dRXAA4c|dn+wpN-FK$wEhP6?9n)0`(m!n zjK}#F;BI@$z9Q(IrS+u1je-L9HGwnZiQn?zwx@>L=M2^xYCZW{W?U-v$HT?f5$(Bd zeyF|%pPFu}p-(U`Yr?0-#Rm`b3+%5CE~C#Jwzfs<8t~!ysLRq#OLOhYwl?C_qU5p- zzP25mdG+--UWVCo?FzLXW3N$|YYCWciutUV!|I-d8R0CWbbpon|NYK2iFY`DZEvlX z*{*y-@tZlRR}8J1QJpC*3i{l};^Klj9O6G%T=Qw@enbc}JxcSN`UfHpnxC!c!fW*_$dLg(svR?pg{Vtd3^luu?q_M(%Vl^K?3-ziTIkG6X1msllz??nQ= z@1+KQgl8)nk}P~AuxsIL{14Id64--3WIr!4$??3zyLes#zDwQk)?Uc@r}t6?Q(hX(_rte)=C)?oKJ;lYL+tB=kbpabsX4Q@Hd8oct%jsV&8 z%$oVg=R7>;Ft_jBF{R=elef?P$SM9z3GJbu1|941Y{)*JRn+iaYWFUrTc2VT(laLE zWcduq0?;f6O~e;i@dloSAi4+GGbFvx6ZgI|3qf}y{^_}r9V*{D?0jcB^1YSiyB^QB zG$4&Dkp?}7vhux@?hQx-`7Nx1OtMdJvQ-#PLmbas%m+r#1{}t-G{49HfBLhhll{1y zbH)3|o4*L^NBuc-_h?^x2|lt``3TyrOv?Mk%9lS3Ki0CTK0S$cfP9wQmi##f{(Ld) z!+n|REc`i>)9QLWbg%E#Q;{UpZ9nRGGTO3Hc=pWKQ+43uKkuH1@;DdIuAy84Y1VEu zggsQxC&JGrOp(6!_AjlzA-^^FL$o#fSer%VQ;h%Y`LfN*9;2V}%5v0?tTnMVYaitN zl9f?C+AJ!gdX!N;%B9>V^cbu@=cPFTYv~jH_jb*h0H334*yl0We!bPV_MYrh17m2s zJmKhGlnK?7>>8_tp0y%u9X)e~vUvwIze$>KG&sttddoj?*Dv6|ygaGSYn!7_%A`C| zIi77h%-a!akF-oGN8jn8Dqe1G?a(jLHc(l-d9IuTP`i0%O&Q9o6y-L*?^I-7AIh;u z_j%NI>HkE_xA*_U<+~TE|qtfmG>RXt&-u&oP3zd{A~SrxH5m^FC&-vFFrt- zqwHn)8=Di-po9XQ^_n@Y_gJOpq(6Z{Z;+z&n}QGjRmO8#Uq}~YzNwH;F1NWgt)s}1bUe#4fs+RD;TnO{ic);o zByhE8%r5;@;Bm8OarMgrH)oWVb-%!EtJsq65jd@6#)rEFPP2me&?#_Qd5jND0v8?@ zccD_?v{D!!3Iwhewb`W}fjjeiqr$acYnEb#=V^eQ|Nr!S&OgEqQ<7-rCW9{NEf|11 zy$$PZ@Y@{r9FeHsU_Ao*4E7w6sMo-cf}TIGBH9e#G-u2ZvtvXMboo4mcm{ipi0T-H zPvbv>J(opwmBO3&&tP|FMOy*v0g-OZpQ-*+sPVu_oLheHLcVg6v{hpFfg>LF`3~&q zCg<-x=)dQI&+@=^O`-jrIU+~$pw}D-&t*88Y!L{b#&De@!Y^TXv5FIZ5yN$xPxusu z?_~7UHtMiVMTnl`{S3cM6XRS6ItnKJT@rTStsZ#LLymr)mY%QR>2C6nQ|%#VC&Rg% z0}Q`Z$)wXkhT}CRb~xmLzwUv1>Dj^Pxt(`2oa=d*;atxFhI2iSdEm!AaGgO~%b^Ln z4!Zu)K1<$Pq@VMl#iw5BeCOSIWh?28-c~|v??vl(F9eQwFGu+Ta6n4xD8O>(iRO>p1ty= z`QR~k(z)5HG(^J5*s<-OGjWq)Wvk^ZpOH3zx+M_G$K8t7nzm@iQwe@jZOC#dF61aep zw_VVR+}M0aLvvkZVYGc^Q`B+qV_9u&B-&XUZS9D3&?Z5=TO+m`)4Ve>%>Gx`?1yS= znrowt!{y$Z+Bvhtx_bWnI4$Bf7@8M7Hyd`&EFy7*bSMzJ1|5R!4Th*0GsV8o(wmX1 zr86UuWu2XoR%{7xX~qWRj@6M>c_hcK*|NgF&*OK|UeJALGlUh8rq0xc(Re_Q1N7O8 zzrux|#sQj7pS}1OyYN%{s`>Q!AE7@rd9e|iWUN#C=1KEuobo;#ziw|ey~+P6kMuhg z1Md?EGWoxW@foF$Q(V}perY@Eizfdn#Xs5*(61PD`FZjG-b4Rmnt_yK!Au|T3zGIT z(%1cc-3a`Lwf-y(ipqK0$lpZ#9cU>Dv;1c$MfEtIWKuBoCqHxJ->s&${UhlAga`jt zHHADm0{^oZ=M#zfO@MASCD!9>kceRZejd!kZ?*>X0x}A%@?;;iI{HleWpvyoqKI?q zEiRk1Use1@@!R}*ysTxL`u`G)4qgzqswsY@Dllq0s4i)KJvO@u3Yld}Y3)^0QEeA$ zzsN^S{@anb;5U>w?!r&JCO`RyTl&Ft5F?b!lNbY1k(m5bHNTO5y$e4TiOK&Pt-ne~ zpPp0v+D?>zGO5Y`AHdx7-{+D(wIwG1A`gDO->(P7u=E5B+rIpF!HA~nTw8IFopyE5?5{{!h1jr|-|pe7INPyw_Bp79M=ppFI7f-@oe}yuXd^yU~573FGD+ zx#jrX4}rhx6z+w!M%ozvaUYYc}GfCcytw;Kusn%dN?pHp7 z`)BQneiHT;2zySE_N04Hzx7Yp^*h`PBio{GWQzN|F1CP=$g;paH7W~}m+r^Wet>CH ze*PHVSGlVm_la|dBKrb#-*6~pUmxsg-oyIyvblYyTfT4a-SA2q^=&f>PrOTWCGysd zeC8spzt(;4@=L59VPEWtXi&BzyKTTb8U>E@QROq?JA;H1)&RZgVRPC;K|X_aA`|r? zll+=~>-`@T_sXYfFQV1_$KhM_duV%^ucR{YT(WQRKIpL1U%73x@3?;t(&`8*S|u0M$?#hPD^^a;=>tA}MWqveo5J?jPlQ*`Ky8 z*>^lS`Snr0-J{YDjq>dsH8pNJE4*TZp8|{0+AJ{+IcgkOJ0LSFO zp3%NNqbl}}_N^aH%SL;~ByS6#?ktOzLhFJ3I+Fu4d@deDfxik*PpZvY^VIm>yv$_{du^{lbm%Z z+1i(!jL%FTNKQYT46#Xf>pbi8(DoYNwbp*H?)RlbWLHw&`egreN$E)BWs%6iq%0(I zIw^oe*83YR>$pGAm5f}Xd3-84`GsVwH#r%v3EQ2VzRxQS>+X-@*Zcb4`B9%-ja|}Mgo8~a4)yg~hbK%K7zZw2VlAHRaJ*7OF9vr5KY z^S}m-r*h9N-C%NU)}5oR zS7{8h;o@IChW&O&s!>O(d{!a(O5h{p0~6qrpA2xR9`)m6gY~BM%BB} ziDO~ZC(<2rsLXo*1sgBKSnquBbGlQ|SMX!c2gO0n%{%7XDb}7pE8kBFNFYv%xluvd%5T`&qA?YMltoMSo~ngG)$ea@RsEL1eDI$~qx_Cv`2v z;4yRUXVq^X=63XZv`<{`M!v{D){ag(k&AfXqur0KO|?$Y9^_Q3>!F5wHaxZ<<%f?2 z@!PkcYvFI<`x|n6eN?_l_>J+%^e*&A^t&5vUrLSDlUkG1ll@VvB;{#eANKDJW>2%8 z&;DpqNorq8U-kn0?!#VE?CGZWnbW(*`TlgYc^LVkulL~}`ND6MT|o2`e9wB&3LbJd!s#pF2R5`dlJ?P2H>v&wN?Y&;G?J`{bS&xmq;HS`m$B>2K1qE7s)2Mri}e=zNvVK-u9nM;G35+-()mq z8h;?)WY?*$(&aetHN!8z5iaR{cc^gStH5#AbbBD8iwf1`QQ0HsNFfYQWSrzzIckOrkgWlLv_?QegH+Fe?= z!8k&f1zn^6A?QgmDNgOYj*|@tIY2L;FYj)1C9co6zp7;_Mck~45W4C1~AJc1FST{sk zm?zF2K)*$4zBx~xr}+Y}Tsg`*u>y5D1%27u??v{#h_T~3*x=3gQuonwDwI#!4?F;0 zrTJk$+G#3d)CD1{AG+)If%+BFk@|gVBi?`Nbke{9hDvO zHv##hv{S~Fi?z;ODi13H;lb&?)K?-H!(W4O`E=+%9&P^Gq!gil>Y1_oHbQ^u4@rM~ zM`jm2$5KCfe60PuQMP}-744pEKhREyeC=qg`W(?GuDGeU2Vh5uomPW>L) z<{jk=!{zNT`mVFd-}Db5f5JaM#JrhnH8;XPdr|f}UlT>SO|;AHM=s^I{Qc(Zld{~V z!r!Ru=-DZrzrC#NNGI)&)MugIW3C;0zDt+gNtE5&s_baY5|}INNDAUKexZ7kN;u|{ z*I;Z&zb*J5mHTk?*C;ty&k%j4DI)+G_Lu8X&ry)N?H%e>wYW{um%Zp zdj>5tr)wdtkx`wj$C!=u%5Eq;we?F@&yIO1CunRzxUZo@)StOSL%VxX7TG@E(Jrj% z*CS4CVgTzGG;aPAY+yc@MfIB6CNuqD^4=@RK5xOFMO&EMbww_IQ{Sz|6Vn6AcBB{0 z1?W2QX!{C$F2j{=1%5um=gxbcFiD^EJlT92{%QO&1^-n3G-mC?zfRkF64&tWdTs8g zo+gT1hYwtpskC{Shf5tgz}`bi4bQ&JvQ@pQ!dR6G;$ ze^K!)#9vkM$%y}6#e;|ss`ymM9yeF`ULaSlhoy`{`;7k-l)tV65%dW)=o8kCwg&Nz zsm#=GhWjoZl`?h(#-o98V>e#cH@7b^Ed)0s^`~^N)>XaLGGovM&B^I6!TP~zZ9~r zL4PqF{_<<@C`3d*<$c@;OT}K2pzF%5kl8EmE&TQk^sYEgyitMu>l1W_}^t@_YNr=*p;g z`v|=v){3W`-Q0pb+O4ZiyaaendpkZzSC+84iRLXKy1#*E6LBZoeqT*LS;6tFpnNWm zI)gA>N5EX#eqNEbLoKG?2t-367ypeShWGQ)+3i`sM>8HHdfL0Tjc5=K)4}*w8_C~8 zI0EKvA`rG;#RKY0SigZ~;rB^}mD1Uk9T3kgrmI3aM%UbQ1WfuBE4tT-F3vt_;`-uY zt_Q)=sjl}U9?u{9?%sAqlSecNl%MAmRwWRLy{s^8YtrwK!a9l0covl8oKn}dt%xTL z3t|++K13Iu8S1*`Avs|KBRTY5Nea6u4Hr2b>iSM1LLfP~cPavxy zN7v?a^B?8ol-6d&L;G)Sp6cP#3W8^!;-hqJKK6Vq;lh7$uIKuA%FYsb2r8ebNRTH8 z`BjQ%2I96ZcDV$^_hGcJPv&yHYzjHU1^Nb|+cUhU&Lgg|4%XMw37YA^Ul91^4*Y3> zlLh0$T>=lqgZMQogP?o|{$rsJ?E;JsUlcf%etcLaaM~dlA3_4hEXW9_g+8+l)Uv)H z=!+eAjlfGB_@x5pU)7Q%*Nc3mI_S#y!cKN#k|7Z+Qv9AgIV-EZt zfnVal9}?-}4vi7!2s`8&sAat-=%+dGe-ij~2i_#`OC9(%0;kp`J|qeJvUm``ZWXxm zS@leTtBvZ6>-QqR;=oYL`iX!9Qyl(nfs0JX;SUOYd>lqUrV3JLyYw3&daNAZFs{`K zFIDZTzVu~<_Zs+Yg}-3n=PSHi3sC1HLeH^Qmw|6l_yGe?f+>CMs~)#~-!5=|$6 zM}!^$bYsI2e-}i*VwfuEW9==Ciz(neW}uIaD`=cep%P7G21{8#llD&n7mb;qJ0y;l z^(mZP^~6{@hrRtk%mYAsHhZ&?JMkCp6xtkO8}9u=>?0Ve7vz ze$KFh>Q`)j{s8f?r|FL=im=c#)^1RKC~Ot!#@Y>yzXiKx*t4kEoZuKbj12tga9Bm? z+uIrTDnV>M{|SM|`qMl`e+2f=U~dV~_@_m>vHGsdSfTD|9;NHK3ouzAC5j-(cuDg?(*Qr}Z@EB5T-dK6qv5|~s#H4))v;BN98Jn%vf{0c~OlXEVrmK%Pthn$z; zKW_8~Jm@d_lqoF|z+X{aw0Nd68P6~|15M;V^QaBhcE zhI9E94CnHz82&{jzYuy*NXEHVM#XWP2d?{h)Bd_XnfBK`f@y!E49yT)ir&OHu)Nz8ee5uw@Gr}#I38d)x92H_^K>m%zqnr|Gn~sw zV>qvOa~RI`*Y{aS&nKCl<&2)=x_=~k?*FGJlR(|=h0bCRXY<)3Ca_aE+8yj%jT zytw~NW%!fQB;uIsffq8I`w90qj&pzG`tbJWc9w2f#i{+_xSmg#{-gVM!g)O%P;v<8 z^>j1Cd417stSN{4=U+n(Z~wVIyuIc5;_WThCrjna9AA|y-0TN=J>~wa`z*4Yw0 zZfA}kR5T|2aSuEVaw(YfxgNM)*EZ=J6+QV0*N5k8l}t(;z3^N`L-Kk5%jxf9a&(l^ zHr4r#}RK3M7Zi=lv1) z!*td@L?x>@W_sX!+{@_$ir0s9bsZrno#8uGoKD*RiJs#@x`;sdIvEv5ZD+#QGCWVk zO*zFLc-RA1ut@hY{Lp&yd_Ag(*qPFO+=E`19g*|=KEn8QbT~O*&_sruhZ(M;Bxj5M z!Onz#iT$3e;`GkUD;ci+pYSllZ)Nmz8Q#ip%}bDOmq<<-qknN+VTNDB z@KX%0V0b3dqd@6iC!^x1#}$NM&u~4?B>V=3-^JuqGJGe)Kf&QO$?_sc^!5#T>A!H+s*LX zG%?P4o=bX07(S%vX}zM3;d=dq=&xY-NBr^!(f>We^*RaB&ttfJSGimdA$sjwT}lqo z&u8=-4EbH^+8#!K6{CO2px1tkSEz_U@)t1r-y8JWzx6zq=xME0hx3zVOy{>0*C~`L zoajF-A#uFUpzl@m^^Crn(cf;+zo6(X`QFXQEylObL!qXI+P0R6*nAnvZ>m`ljWjf` z!pk`8%9_fG>Q**2t;Q=-Yw-ruwkY0}+j58FZM%+_zUE(l)uPDU#>SRf`e0mBQzhQ_ zN-xE=KSB5Zu=h6bRaIxY_&(^L(hPDe-UFm2}=TW4-Cbgs01BnsyL zJnQ3}b=KZHiPV|-|L*UuUvl=^&wk%`t@W<8-nG{LSZ`6=nl(uebiVI^8cyh%o0{Ww zYw3or6^`ag&s1WxFyYn?5-G;cdp%q zs<1VU&25{8-i+A)9b!X{*jV{Yvl)rt=tu+)k$L|@BhN!@gf55u<_*|M{g%3x zwhfgd*&AIl)nM;)jQ&-h!d~fO=5PiZrgNY1p{mrgJr}Ee{M8=*OW9_kMG5b>@rIh4 zOG`^{xpsMZd??wz8ZW^Vi$k|?H-L%Ewb+|}C_y7=9&^>_FV`*~MwV};ifj>0VvqLt zm&&j6`v^z2HbDke&2DLeX)9mqM{PHHPt}iP+xkQz(Si-FH7#*DtPiEY6#G)FXE=>; z3NWNGSb1GBdX1Zc&P?mpqfXl5m{l}#;wSwsd;%Q~k;uHn3`{4II`{n)(~2hN z?`F^PP5}+_hLzC*8oiB`YwB)^7DnUoTN>-=6{58@-nOZk%c-?Zr5H{v%IG|B*;MFB zK7G&p&}}5WFNJ+>mM${HvJ!eq|q;E8$ea*AflSKYVX1k_*OREyswBM6^J9dl5G2j}kkAL1HA6 z6IacTMD7UYCiDqXG@oug3dL_!{Q9^bDS{Ic5Q^WZ_{9(S;eG_L_>GER;g6(9c#AI- zzfti!*X4W!-bt!w7YiRHMdG(m{6@v^92YtKm{iZ6BYumET~~{wn-lR{D1M{j7eC+< z?oW#G3r{HGx3I`1Au2A&UqL^oP-L*sMOlOk5F~yJ#cx!`1-UG`r20R{XWY)L$X4d-0b{ zlqn3C{_7y~;*TkQ{XVeHuU-DIHQXhdQ5|jmdwkM=S@EA3hW~P8-Yfsls83^m zJq-W-KKg&;T*){b{{uehx2aDcdWK2A8+OVo|K-_|ak%z-yN~|wQT*k@r2oHt+W#@d zKb-yH`_^9hKcVe@-ZV{Ty2t!Os5)AN}o_B02VGMs>9L{|tNgJGCBty#&-1b0Ws2WU z&pv<5M}I36|2@Ox|1k*k%KyuXf7USkO`6}bv!@h)^)T({6ci-VxA-fj%7mv_xLtno z@*mZh654a*>G%{h`TnwoPh*^A9ohWb6~A47_IWMFS+Ddpf0<559c}*aDSoS%&M}pK zzQxb9%rH~U|ALl);5134%t(U8=Kns*Pt4|@toT(o^5y?0#UHZLFH!u%>Azg%-{vn< z{JDvgWyIFs|MJm)^9cD*LLkd;*Z(7m-%igy4`Q4b{K3?W?inHfPu2Kgk5gTWzd2Q` zNQ;oJ$E#HSEixy2x{b(l&8M&aQ1M&YPw>N?O-iuK{}uq7mkj$Zk!)RR9<~Oj)%e%^ zx?TP&>fcI2Fs_~_`S+;|bNvWX7yls$^x_ZCm;73<Fx5bgMPfyAAh+_SnG-Db4qXXpYOw;tN7oMVnqH~6*hk2Fv3WfX$I^f?yT{BwNtf6RlQUuE0;TYU5%Sp;GPGyN$z zxB0jFq+jd7&nbz`KL&>1tNi;Ezp1|*5B?i`_f4|~C3?2sRpC;YT ze=p{LUim+`m|V&Cbjd#h|2F@1KKw6x@L%M?{}(>?v)6pr~g$2=w5ei&!iOel77FVJr zB_$FWxsTeI8cUgQOr##e4s=m5heh|pz9n)#%kk+l{Bv*KkFf8o-~PPXm%@UGb21&i zg1A5O+=4ATCsjC|^sVqM_>}tq`#*2I%jx7EyrZ#g$OrI2k~{sMcZLTxKR7jTZr>X0 zv)zULYq{_G)KIwZP3)h0+gayrD?58pf7i!@2NuBRta&|vJ>}_hwhVja;#;BpC)A$$ zCe53t2FrQ9>(tD1Tk_H$na!=z=~cD8tX6RkrePM}KO$ z55&YC{qMt9-3K`59?EyZ&mH%)rjNR!oY!{ZKJ5YR+wgZPf4L|t_0kb=4$$Xe1Ijv_ z&qMkl(|l6{S$%oftN2_c=Y6CRMx1GBUt#~C(jwl4blzCx+(&hrJWe%^rnS-M+NQ~mHo~r=gCpwx9G8Jl(ZIBq1L2O4^KKyRo{;lWFx(yrycR6O_dKESlcB&9p_B}W@+}M$3(wxKg~s+G zx1oKgqV3`E^I>OSIQ&f5IT%hm9CnU{!zaRlo^aYb;lNwr@n$i+eZHbU&`$MEa1PW> zTh!f_A@}{KKb~c1|KULFN6TQF=u2SIgu}nQ3pU^u)I0oWb{@G+`nc#qyJej1SNlfQ z?Uy*)?^N^?w%_4=q6&Jn-#5VbF8nIhVxRB>=f?hNH~RPEVffa7O=0?I6Z?d)B&uGw(RseJyxNPr7u)L;5wyKj}j# z53hxhCK`4pdDw4BLmPuNa``&QShh*l)3U+A13ah9rI1J8Im`!rPSZadkKp?KU+4{^)CnnPW}yiZZ$w>^xYDUZbjS6JX!&r1-~|S z7k%Z?9&k*03;8yE%4nazr*D52KI<^nqCGpx&Zpn$bRzG4?I;_|oerNAlwEU<0o%uTiZn~~SMArE~7dd}#pLOswf=0ZOXbVWZ8VRp^{ z{7!AEJc#tNy!a@Wb`WiaWuWiPrGYIwGdA4ysqXVS$#i4eX>8lah7uu_d&~1rYzEjrzdG12QIFot?6``-km6m^vx>#UF^`; z!bp26`pOa1^>oOVH1wf%&R*6_uTR~cYWfNMj|Jd|EC_#OA^0VOzvkQF%NG8vO5mG{ zbz71y{2?UtM*q#)XA6D$Qjh8IM>$eGo`SwOUpoXn{u2Lxg8X7Gm_?s&Z^AZ7zc;d7 zhC@@1(q~8T&i0*U&XjG;&k?luBdCkxzyI2+Ja_8q`n4Vch*ty`yw3SwE#`ky&~B!N zru3yl=bB~^dar^W7hoQUo0$nbyy;e6Rz&y4e`ltU>+#k!*W!f4%|7dvAZT;P_6U@_8%x7ac=k3Y~ zWF2)v3tlI-ANlN3*PqdJkW1T&ecxW`Mp-#7h0zyCKY3E@pYkBHcG@p@o=;m9x}dME z=sP=Ulglvo)$^V=lpcNx`NGGU>R)+i7j+?b-+Fs&vTyCmbcEmCezb?z&=$Jz&pABj z^_(MR!=Enu&-ppWc3%F`<_9;2CifjrcM|n7^=w&}wA(!x$8>##H*FMkxfB>_rW^g} zIQ&}BF8lYTjs3Zp^K0E|S-=uy=DaHDBbh!BWqZZ^O86;-U*e;A0q6?poPXz*ogK*c zIe+-ttLe}+^(^WraDLzAcX#ZJ)V21%pfLIt&cS&v=Hc{XH6L=yHlMfaJFrK81wGSV zZu($wzm`G&%z2=HQSnCt;cZKCo`YjA{9#bW4^{pA1od zbWu0jsPO&9IsdLd3?ASZ=51R8d)ep1@QGskChZ8ywlfLshIRHV`cQVxFLq+yHbDLB zx_JS0oYX)2y!7q&&#DaBsOR3>Hi$JuSI9ZAJDA<~rm%aYog{3U);I3)W|5Y26nl9kzGv7NC_-i6N> z()|{589YKl+oKpu$hp!R$ScRR9@r}_k7HCV>XU126JuZKLj8RKZH8%GYUP7` zr0GCi^7^b1YX;7 z>*D#~dlCH_?*w#m&haA0(bE}6_F~<>05Mc<2gAa%ANl-)P@E6f?EV~mGC;1B;f)O`f)3yb+I+6VfPCQTjM0msAXIDV*P zl-<5b=pFP2WSaD2P9M_J*73TYb7;NxL9bKc|Gf-8+PS{}2zH!#q#YMH*5{{`{Epi< z3i;66fwJkx68CC;hCJ|EE^y8bcyC3oJBYKd6XUvLxce=n=ek4MYwC@@%#&r{b(SG) zw2fb&4Ajj>7$@F7CpHj=4d)#GZM1FrR6cRp;%?4Me+L`QxoIB!ro}OjpN%mxJ#hcd z+Q3;yx8eWz4};zGvAcKEy8aW><(Si+m$~aVDBm3LXFxCX-_||t#hu6D`?(MD-iCbI z#tQIozP|?bJq`19_I1u{df`X*?MoJQ(|_aO^iX#O?p?i&yt2KMK4{U>&oSjO|2@hd zE!*z#OSm4;{*RZTeJ@2jFGG933hn+%j0vUi+n(x=mi35xkK9^?+f-yRFO*L^W~ZV3 z)6$-pzR*nj$1#MyPS*qj+k&un-Dvl`H$oozNTUqxCyoBJhx4V4J^0YCI6ejUQty-1 z94a0P^sT`>fd0(o{PJA%$I+O>rirxJPtL>r$1wEBcn0cyshYR&JP-X8e)0MS`vTp) zGo9|^p#1=SiaO=GZV=})QJ)Q1i*Y@-4((wZ+QJ9H%%filWgcCP_05NvU-zGlG{&WG zOAkQzq4WB#zvSFq*ow8B<5nj0$#u`MX-9UF?l|rX(H9r#K13S1HRL8P5~dhkFm+cVmUE^jZ!>r7SVbj+2f( z>+#QVvwi-#yEY^LT;Io$2Jd}vKD!*ged$MZ5IkFgj#%TaoA$y^^7la>)cY*Rtf*^% ztcLy*(2FT&8v07+$7AL$42(Uxhkb?ZeCEP#+9b~T=o6GYKY;(_fI-9lBl3ZM*%OjJ zjd7bli+cCc^>2cyx)$j`MxqZW8M#bf`F*C{_VP1s`xs~2XaAyY&~kPEx=Yx8nZI;g zC#_8*`ngKq?1Q|={{9!z(9f~9yB%wfy91rt?;Y>YGL5dgFYFJfJ~K58znE&x9!`_( zVsJ3f&9;6V?cvSAV0RhrL-3sKfcG#4$H3m9t|DlkSPKp;Kpnr0I;M}-HK=3u6}I(! z)N_s#K1zCwJEESCpq|M~-*l|!nW*PjU48#D{KpMBi=cyi)bX34@ZNUR@tf4~^o89g zR2}pFI@4K)I_CA`u&L|RcvFutbS&2G^p#hSd~hyZAGmkt3fz0xopSFXQ>@$HhU`97 zujkC!vXgwKe(7_r`|hvw(~ipd={cBBB=4!~`{=AE{tKJQ{6eO#Q&V1QeT6UwNrx@Y zfKASXZ61rd8;80(3w0Ojd<*t%HRd5<$VFZwY#+(B7?(@AL$LCTu z_>B+adCA~Rw8xpa*R@VPBgv7^ zNGJpM7Lg~c@y8-hSZDU19q7(Q`{LOBCdS*GoL}zj!TJdcO|eem_)&p;MR1+U7=GwyxXVvgJen_o2Qp`DbO8|c`%IXJoRE|h&Y@_roc z9Jags_yzFKi1p*!nENgNMvTvcPR{&>)BhOs`9AbAFa~WE>9TD3=!?9!ysR>F*W0&2 zFBn&-2lz}Dy6r$&sK?7u?tb)-DIVqNLwXsi3~`j9ViVSsD8pg)sZWr1-jDhn=1`kM zcI(YM#3EX+r5FnwF!y1M>*am>RrF5Va1Hp&^O_0 z^F}#OXQ15Q!}Z%xS1iW@l!N`C`|dA^zPM1>NzUz8qR(87zOw>-XgT`QGR$k1VqOzU z_-(r^_vqq7p~SvcMle!-lvc1<{0|s_s2*(2aLMqe2=t)W5#s%&hxf)9M9fG zozM<(Tua)!F$sHDOM3@?mgzWT``J5=X({$@0qCb+Dec{5u>smU+B&_~XL;D4gsma% zJj>SLz8-1#Z`vE4>w8am7z5Df_hzIy`{5&W?+J_#<6ui>;(z71Mg0%pKOPv@cTK<< zxF7#3N5uvn!2dO9AmKvcrumnP02U|(|J|G9moLPlLNHe3R>;DnHracU7xM)@o)B2!e1Y>V#ngazArN%!Ca*<= zeBGjwc(0(RMM({l1wG%wO%1;h^nANEHT+0s(OPXvw?g%!36+?KxJcl_vZY+f68N|jj3@K~;$i)r?<<`B#0(7z-)Mq}uT=OE3olSO=E5oA zVTHR}7T?Kow<|J-#7LDQMx2D)qnE{Y%`fe~G*GFLAg2 zCGPGQ5_k6tiM#uS#NGWu;_iMSad*FvxVv9S+}$rE?(P>7clQg4>rtI&GW4T8%uuh)fOUYH>S;`3xw{OWd1e3^lF z8T8i~_-YlmK-IBE32iNV3^5-g$1VZ|ZH95=8kX zuE$H_SL(!YoPl!&;-xYwejinF8!s{FO*y|d=*@JW@S!*B%cSo$=*|2z81l_@_ZYZI z|BQi~?L2JAH`7(*!q?3beS%|~AxC}2C6L1gZptq+Kbqz8s~1!MrkqE7 z()H7)-V5ArC%-n@|5d7tJdB4uVIaNP-}%r<<7WHU;}YpjIb{aD*-!Nz+oU(;6iD$R zT~q(74Sb=B^DqVc40gF1e9DFUT%Pp%4BX7W9+zxA=)Fno{P!Ai%yMzB0S3~W^8NH# z;6qnft&enFmO{3%|5sm$#=7P!&-+aD#E?hBO1i9 z%)qA`xE{Yqf02QA81xq#xVGD*zr?`z8T7dZe$2r03|!ku%E>oyZGVW*H1G1%Qi+&^Z&e0#^#)NSeYZiUu{<8zD+zHh0iGB);(Cil-wZ1FtwPW1k==GN9UG}pAu z;~wbiH#BpbYOzJH&Q$8o)BbWici%1?f}Xo>3l{?J9kJEUk+WAo^G)kpy>^@T7d*sv z?2WVQs#`ra+?LvvJ8qv*;iGdC8=BIkLCY=NmvzV;sE5#us*%D1?6c2}ms@LYYJ``B zcw6O~25fwdZGq2Jvc0GOn#!vBQtTgVRr!e9;ZUlEmkI1gT2&vfs;!T&t*rZR*=%~~ zeWuT-s#Wboi)+@iK6LHKdTCta%x-OKK~3SfsZkuyC>5VwYa3TxdCRI>upRG}w_rzO zY|7hMdfmbycbp!gof)noum%0CY_UST6kB}8Vv~J=V&iKz(Ps$r{YMHmJaGuYZB6mY zHf-Tt+1mCgrBIZ#t9A<27zW=7?;htP&Vf?m7m<@8FfSl0c_37 znL@I^Tvvp&T-)MCOGdaMGImc*-SL)yy|XR$w6*tLl^fjZZEjsxhurN}ZfmPIx5fUk zdVgA5%&m%j*!6j+y=5ur)80Ln_cM}rLG|RzabhZ}@7HkO z3$7p3vLSKw<6q&y&-t&-|7E0)Ki88By-rqQ5(mw%fd!a94#lt6Z<^lb|5vQby!g}9 zyj0)+_2d7E;#cIF3}3FwXpF**V5k2Su6d=OJxVg^b-NZ}^S|fA-#;`G?Meo&I-x%KwgfL{l(K`d>gh@k&3!2LT9n zdiMDtAO0DN|M_9k|GrQ8dsW5h=SVU)?)-f0!(WmmIhJTfb+q%}igxZ*{yP-EzMrSX z*!+z?_5X}|7^2aP%-Ph^(jAYUF zTMMxHe~*4betYk{M-;#6Mw;Dtq9vh9mA-k;fLHb?{^kUyj4&+*edK*r9hX<+?|44g z^awWXJ|Fw5`NPT%Yf*N2=0YL1?ns+GO>&G^47|s|@k{e(%gfH?V77J2OS$UiQ-z{q zb&}5J=Q!cT-=X-o8~hv+Z2m4KSCJ(S+3F@&nL?O8hXkAd9bjJRS15j6ujD7M&3~KD zzoo?O9{e<^HveyZ^mo`J|Ct{Aze9uY%70kh%qs&A1M|{C{4~in|FvioUi>k2(^9unra#_;f4>j^YQh{Y2F?G|@%>Owa{Oi%Ez4+fz{QABx)8~+2 zr;m4~J^6E0#cu}>1NkT6-{wEdC;fwp{}`DO$bT;WZT{Z@5c#+JkGiR9mY;8h*!(}x z{SVj7(53jBm0YIJd&4&WY$R@(NFgS>SW3Gn`O1I*Z2NmzVPQ$c2y$O13COto)q6k9 zixw*Ck?;LfW+WODxrkxCfQI{D{BEw)i2IlQ%IlrPs|2Z4vBa!9u(;h?4Ig`O0dz}c zOC4`;$-AQPqgFHfs+HGYD(>mIZgOEcs47XMbLKx(FaOaMQbH`{EO}!0H;yahS4}=a z7*nz41R^84I`#1qz&dV{?lVt}xJ|nN*F3cBX#O(A%kh(K-R=W?Z^Ww)^ees<>ZlKP z`j-{XnQL@`$Z&li8l9_1K0zNa3K$a^iQz51u*3J+|2z7@c%_WMZPz-1J3bS*VC+;e zzG!=8w-K#7edM!6uRgO&5V&jOTLNbdriMM@)KHFbIponQ*j6VRxX!13ZpPT>)QonO_0E?Wg(!mW5pB>HZ#ai?EAL2225J8rJ$EjFWfUl%v9Yii?bTPhoCyt+Q_ zV8L4w?;;f#MLy%`VsNrupQ#NX?@;_&&pLzl`BKGi7iNiKXx5DC$g~*r@g^fKE~uaV z=7|y4d>Xm~5n^^rI;aS8HK96c8=~R+5wUrBt%pAdUGgAo4|S~hb$_`Pn5`4Cbf}DK zI`Xo8+Wg-H=I%9EhbjI)$8|H<<-e@3L=ViulnrOYilS;W$H>TSSclFuG7=M+iQ$dS zv!o5f-YB??eVDMP7aphU{h>gH@3x;4dtuPl_2By_v7f@?G>UPgwpc$-c4Ph$^F%%oVhw3e8aXs2j5ar=6l#bgK2HP);Y<& zNbtT!IrimEGjJ!aO@i#dz_)6c6K1`R^6EU*BCci53yz)lP;holeT(wTwUC+)dfUmMZxXlnd=K3uVg}-?4?{cWgQM9_sY% ztDviEptF_G-PO?H3Vg%1ynAqQTJjsV|Ax8MNXHcR1+_eq??S8|d*S#$3x1dVhce3W z&!GDgu^R=F>^B8CZ&p|ZJHr`%k3tdO#lP2Bc0hD8jyS1dufVN|HAoH%+-*gEDsXr2 zmiMM~DU)gZxu8!O#l%Hv3v3H7`MkzTUI}?x72@u6CA~XciM!K{BA*kD5l7Smh@Zp1 z`3^Svh6rN?p47t(h0hguGCw+2CGey^HO?`fZNd(lfJbZ)=l>J`UibkYT&FV+^0?;I zL6?JTQWMv>cc@46L^d85O@PRGc{P4e3y!BE;EdbZAo% z;=GrW2t&Ml6D_XZw7#+t9$q)BZ>U>eU%E<7=-gAnl2pogWkZ9W{kez>3+Yxpt~n?< z&gXxcA^CcaNg}3-)eTL}HS3+~2E6MaSCTI2(ZU2*Q)M;$2v^a0F`P9QC2)~oO)Dm& z&3MNN9?XY*b7UwbCDRis8bNxiR^3vEGE_A*wPJcZ%xrff7P@rfDLio|{deler=Qjw zm~%KZ4Z|YI77~*O*RSs9`odQb8H(Sgx6f%Iz4(`?1ZL=j)Y0Z=Kk?$%{okgy&wuK} zukD1kUpgB$KfihLN?-T83SKmV&A$WJNTU0@Ce_bin>C=0TAV(fi-?Y!q)U=RfC;;3 zzp40b@-hYNN#Rb;B{cj9_-$TZ)9+H}s*7g<~k$+rgh4ok=OfVa5NqET9prQ|Gdx z0=YrFSb;=_y^m5jf1X-|jC>8UkDXM7m^j9qFF9WnZJ`+FU%hJ#&ab7u&fv4fHWFH{i_o!t@&E-E00DYm@-?Ld8B+{hNQ^gU@wp?^-m! z3vz2$LI&pcwJV(gq=R|-Ld?^DCe}V#NMi@~lxPpe4pBb$E3U;_f%`|G&a>+B7tTK( z%F9PS9?T1zgMC??z8L2Eq~Ts8q^Si>Zzwk(Ype$;CjdIi()(g@-(IE%EX&jZ%TKzn zTAw)qnTNyj)1DpJ_nvZ~C#e&b74fXWP~esKGM#~UuL+{eA?!=)bn1QgF+bVP{c5-; z=K+*!Fob(PbSVold>?Y!A}Lc|NbPd^mP?1*PtLxGp4H{?#X(8vp+VEI$vlP*k4A}d6veI&-vU_qYU*R?m;4Ny3Kgd%t4v4H;CNhmSqXI z2j}ZP@%|V73iX0MaXZ?{-Pp5O=?v@keC{KKy=vKJb$UV;=B3+TN4q(!`a=cQ(^>tS zAFLSd%wyYPo7#hG74os|jy*x=5ZC(J7XsU?+|!u*UCMm1y<}ni+_4P%AKiIle+P7* z{+KiH8rJ==;i3*;tFxXR?L=`eU}6CMOXuSkNb6yh7V~>prHB2~^11gg%g_IJk?uJt zJLaE>y6iCe_T{jv>D+Q&H&0<&hiYI-`*!^%)0Fvp9Ch~={;^M_(B+ApEHlTUrIemTiNr`U@_J}+gs6ID~$G9 zf%e1vz~0c_Cv5Ah*sF$i!o;+_#9nIS9W-?Or=k5l5iG|2N9c>~!Th)T8`G~v+U(C$ z)1905U_VV5U5qzQC-~y^u1{F9UsR#I&ggW<8Vh8_FFt3^xQvG(@Y)Z z+)UpmdN18^=r_p!j+E=z(}{cPa!=Ud{AO@ZSn^Bz(~iCOokOQ@Pwf}~Hj@Vvwy-M5Ybh0j3ubM_&N4@IvUesros@K1;>Q&R|dbRiZO}UPG{Um$+ zn)TYD`T)k!LkrM`>d|hrUE#iQ%isI@tF!9r4|2cLhIKO!W(L|1#j&r`aY^Qc%?_ojcJvWlPU;GFCyRP2OLK~;H}r#I_5n@vpP{Q1yAcqtC+$#n z14T;q53NVo*a7EXS`T{ZCXDtH!1ai-lV&^wyh7nY#AApr2s=^gz?{#9F;B#Jjr;BJ z*Ms}i$V0*=Bi^ZP@|#!4z3_Hp9IZf|V(*5VXGxpjNuTpsNssX_pK}q6*PKJ`(DN+l zwuSS%a2duzjN9RbI4)fED(7m@hZrC9JleJkEXNJ+e*MA6kp2zuC#3C&U)~okfNutj zy||9K5X&3ne%m81Gv^L0^Rpu^^CXm+J{R0&W}bDKGf?KGC^M8kZ}~vegS&&9e@`D6 z7!9(P1)6^pfi5x@VvZp6k!ma7JNv?ZbN)36<@y84gt2>I|A(7@kGX5PpFSRo$-2>V z7}f>pIfr^S82dfzBOQ6mM;`K!FYXhWj(n(j%}LH{4vV?D%oFD??~Pg9&+)1fb1ZYt zL!5dy+guj%%6vXF!hGsAkj`f<^0|A6d=9lH^IA9kck7aE3;SEP!KjZwW0!Usc~{-qUwu4exKu1*Q~8+ zLBW%`L{^}tx|>kYnpSEOdLwD1wWh5Nnsi#dN{foAX=$umPt~#tnl`jWs+u6VwVCfS zp#nTqbou4UrjeWHBv0bdLuI_Jsky14>84E%s<5dFlH;rv$cooiuCGS>LGFDokrWc* z>zk@;9ApCxrY2swb}bXxgf{!%76uVg68|rLt~y>NZoa33HEd!(N%36N*hi4hV2TwF z2f5^ahogD_NAPhlU|7N*i?^8=g5XWr4{)q_=Ej8#gHD6_dqNF;JV z9qaHVMR$+_%zWWAg8vihI_}|kmW?vTs~gn&5Kk@tSn+;rfx7-EuP65Ulx?5db^9v8T%m6U5&2wc4^ZCp8D;DrVvqwkAy6}j*) z3VLnTl9%TSoYSV%aE`$F=4op9ywHzoM8>6$1$~-<$mnuGpYk+YT!3E|1U_X-4Zjk& z`nb!u^w$Ds4^0h^37l{Kr-pk3E|igSVWz-Erc&^*z_FsXLZ1*}zGWZ@-W2re!Ii;N zDn#@4PipwRpr^%64ZjdL?nqhTpui_vsN?Js_ysP!Uf{FbBPio) zP~cdxSmED<9wHX%IL`}u^#IMdxo&Q}GVw1=AhZ-xA1 z|JC@9U>btPTSYou&Y1#kT{ zEb#~rt`KtEc9=vV$8Cot?zY1cpTUDGgi>};OI{8+?)e+9hEjG=GwG9dAc>ORJ%5vO z-19eyC+%UX9QXW9(!1wx5}$3dC*q-$oz)UB@|m<(NtF3d+SOD%X+Kl(q}|r|g)n(J zd|;-6wtEvXbh&3m*yjvGlo$RTFivzLT1;Xc+lQ&lM5CREH43QS#Sl2rXeav=C)Xhp zje6H5FGRW%6DtMG4-|cdke_T$UZq%cfeD*{y{A$sDlJ-Z-ISxg5(XRtIQG8u0 z)6?|#puW85JB9v}c2d*7=xw6$R$P}1RIm z$Zr%T8an_LC^-=yJ)gq7aiZZLKwWTH<D*ByxzcP zsQ4rc&s8|(+-=a$GVlityg-QPBS&vT|xUNU$)5LW>Qof#3kWJU4oqt`A zlw;;UqSB(A!z#{$Szpf^xULsn@0Y4;x?Xg>Yh3G-^0yjtVk%BO+;8A|1Z4jA7`V<4 z^Kat1UMT;&2EDFd$}#n}LebdzT&-}**=fkBR&ib48`OD&I_mOjyiMVDd3C)LKcr}Q z(B-o0YnGz1>r0oHdV9{0GgifIIl7(Law3YJ`7zt=rVlmgolm=5rkpMM z1V>YjiR*SwIo~$u!z#}FnDkn2%#Vrdc1Zo0a%Lzw#P2oa>-^jDb$_(w>waX**Zs)W zo9;)p-eN|6%z9jH%0X=4D`5OO$k`+-$ zZ67qfZa*5=^s^K_^ZAg9^Pt;_mapaLcB1hgD<0iWw0uo}g}SbBO&?P@^=78KMB%K* zCsmw>G6Uak;44&|^=s1Wc1YZ$*X>Z}|0jxnwL0p2YP>?>)X#nu=fSK;Gu^R@#?H@p zg|oc7RGfz_6=!+(s5lS0-YLh#wS3Z>c0>6-L= z6i$7b^iLRaT2!2eE(15q`=r7t-=yECaLPC7bsu4Qzo=+zhYZa$_lfFmclyB0% zrEuo|Miu9w*T8Qz@DnOd`6m543a5ON9uHYWpnS6(rYW3qOgyY`_S3&mGI`MN#n|Ji z9`9&p%yB$o$T7!peLs)+G24%Rw}$QC#0!)hJO9NBxAV_@FfjjrDWl?diI1EzA2}-w zIR^|ml=2P1QLRkuDxlDY94}Gq}SuT^l*hkJ1AH3EFzenNJ&)=vx4;?CQ z=ks}mlm1UtoCn=sZ2JGF2dN~}ZkTpn+YRc?#C1Ed+fTOAxvl3}3b*MC6mGYlD}3av z@WGpX@Ou?*x1a4k^e-9oX8SSqV77B@KUpqQKRt$g6F;GF(wpPjI|`>gG;wX;Z2fCJ z+xnbhq-*9=>)EE){nXZf%ty{DAH2;6*Lt?~zr%z`!$8zcDqO6lyBN?t!LW~ z^ecM1eV$S{_0z24;(&7o)=LU5VTbg(?1a)3PI?npu%I``Ym?r@O?q?uHR(;hj9Hm6Gol>-=arwm!9-D^wmdy_Qp^aIObUJy$4Pr>o^_{b*d*qt=hcbv-s% z>1sJzZ#q9(j_xBG*K+Pqa%??s_sQoD3)l7dh=uEVd{p5&U7gQ87Ov$yVc}ZNlM1)< z`HX>^?O~+-?q5}Vu-nNSRy(otZ}&Sb$8INDPRz)s*&cMc?E61c6uqu@t+$AU>vZk% z>U4F#u=B6Wt8pzy_X~|{Il5f7{&l_E`M2|<^RM%x%caxR`O&ye*UpcY^J(c{w`Z2? z0i|0W_NzE?^S<0zh1=uic!k^JrmjcIF~?0s?gW6><76(L?=pntLEz?n4=tbcf2C-6 z$TsLH=Ncs^V&H8m&V$xJ<>-4?#I>GverlB*UGK!raWh6H1me06^1O^c2wKkFO3o?? z3)~z}R~z_GRh)+k71wg~`~S5DZjQ6f2L3Y@HxHC^&;WG3a8ECtXI|FjCBD?4*ZCyA z%)qs7i7z*B-L8nQFz_xFCw{eoYaJ3_Y2e2U`fCjQgn?__3JClRl=FER6~B5tPW)N} z*XwBF*BQ88&k`>;@LHTR5WikV#qT`^euIJUF>v#~&3*$n@3$N=@Gl#3er@148u&W~ z{uKkyQtgiU{HlSM7$-s+4@`SkVBSgv#yw0F+Gw^i=zDwb}546d^_ZjqGFz`bLz3wC8 zvh77PI;UmB`nI~pn)t1iE$i`l9=Dgf&E?>$FuJM*?iourwzX8=P}9J5$fb~EAC zvucy01YUK+rzleR?mFX4Mr9`P4P(v5p?tBG=x`LhG^Dx0Dw;hyU z=f?3(tp2tZ+YZ|GTv)j97JSpo-RR41UEH+3Y6Cv+Twk?`I~4k-WLHa+A7r*d{b(~o zb0+c_Eoxd{6BjLvb?TM1;w4Sjtgk6YBV4LKHWszsI?^QOp#*SYSVP}}O&eOOYDUgk zS#|SS`LQ6M4W+3pCbsYwM~K*i8oJ>Pqv))C${Kq)e>f>9@*lON->@32}HEhaD-c zBhe5hJ84Pb;T9VdC-%H3t%I9oS5D^LRI|RO1)C!$=%R&{ZP-MjR+$*lCFe+UnAuWzc^=V7kTA)~%&CUu&b6md@1|9fa5|nyVC39|Nnginu*pR|cegOb zKp&}oz+GcUVYCXJRyszOTcOCQfb#WRRSJK*x}Ku*DqBG##%pf7vZ1Ah^Y1!17sj8a zN)#wr_w(CoYtX!|6I-oZDYsuK)$`_|=i!C%t5#gIXd#@)uZ%|vi}i%ZFlX_GI{1!0 z18LE@V#;sMC`Z;5=BZJS6JDva(rY(Yt2WRhYH^qWtzSwsL(=YBMV+e@G zqr)?sbF668*ukdsTJ#MqBAF|V=p3oV$|q2Iv{0x?EG96o^{?i_!X9UYbIsav zxk-&r-K;1qa9Qi-wiaJ%v@5+4Q0fLR@gcY{F!^rsG8A87iQiB*6jyYvX`B`12#m|z zI?S(Y+WgJQ$lB#xbWaQ+Be72-5;(6F`}oA0)(@30&-p;I=NGm$w89Hn!-g8{S~QfL z5ooDIW@qgg%E4y!a)T(>w8m$i=R4U^Qx^Z0r3&hrIr{gV=sYw5y(&^`p))XPg~g(; z>aCB&PEy>jsjbG{Z==QWICk2KbIE(n`l^~}31ea{FIIU=uxv(W6cr{asCSJ}3zDHss_0MjP;EAS$TMa3T&^T5cNUGW+E%$gFEm9_1jrry@ z!oO1HV%WkSW9}3ZnTi%&-&%v&6D>IgxKcV4D4I{-0xl=>#c(o@i^A2s%i;W-dAwLG zev9x6e(|gQ&C@lPaL`pOev5>=`9cwoinxf*f%`5<5x+&kl~>^$x1)G*A>oW3^wJlC z_$?H_+C4pm!AAldC5Ycb@f($hNJ+R(D3T5o#4o+mBk$s3RB(#lLh-BJE1(gH_Jl=7 zbavEiVZ%79FBZ<~i^T7z?YRE&Q6uwP&*uQV|Cy%lb?W0+z>iy_4nF%z3FbY1^MdBn z81L&2#c$r*CXda3E113b^>YP%)alv$@EzsJzluT;mdHcm*XDl$55TXM#a}%P|9w8`>-+MlBStzHM{yH$OF+491iC;VYd>{L{0^=IPaP}_* z1I6?U1$FZ04#oeNW!Q9g(8mK4B)=Li5(2ny0K-OkNc`INY&yzG%;tYY@!RzF`4>^o zL-E`Cvd`O5PebwB>DlL*!=!K1+voJvL|yuo-_EaneiwKt)8>Cv>CdLO&tLW7-!nq} zcly-dNcq3`NAmyuAIbloPyLTH|Fh8kz4Wj7ZGGG4=c)b|vih0kx7&+-zCqa^yPtQd z`fEo0nnCwleauz%-=twY`y3mQ=bBGnz1U~|qxoO98m6898(@E}q6^Wz=gJf|s&epM zBd*^xf402r^nuydDKAyzNdmnN;yjpHvH4q2ATR!X`I2ATFY=Sl=6~8J{oI+7Uw?-{ zeoiTE{@t)2Ug;OilKk(;gLsdPYgC(m9_+Lif8%otzcJX)k zq`%LDpHm_`{qx3qr{C|v&nbb;ztTtlvDqL-D3b@FKd9QpzZHe|Du0LKuQ2$>dGNmm zrFil8D*i`QhhhCg)vok+Kq+4Q?W#j(8|iaMw##3QM(xG_h~n4p-!Xlt+QrZJ+r9WJ z3T46@w%)h<(_gqdcC$Vog`6uJw=3nB&AGw0usr(n<+~)tL5C5Le3iibO zjp_4VfXyHG=|3-f@L%Y`|93DdUgdW_=T84K9{h`tAus*{4}R`uX6OHYAN~r(Uu)D~ z#Dl*F?aM3uBMU%`FdprLf%?nAzn%VZpZ<4jA-R%!)R2D~{%!t2B<_{|Ba6tL%0C_F zHvc6sYF_-S7YlZ=e=TE7e+K?-{)>G2f58%RrP{xXaBlPOL#Osif1~0z^?!*6|3u7R z1i#WV(`4u=mB(y(5cw}g+)h8o$A0!J{tA?jf#sivf17_L>Q9l7ED=^OmC+lJKQpN2 zlz=WPDlCi`p}-ZF$Xw-NbY8+W>BxPJ#*9Q`BIOwFH>2VH7yE!n6xxb#T=3d6UUng? zV#zDtSDRhc)Yyo}YlJ)JNJg9XCn$nI}fvre%Bf z(5|ET%M>qKis#(*GKDL$#GzlsR2NR*cKY&VK-SsprZwv>8{W@uNwjFL=pQ3*`?bbL z1u>D67~Ze16nbO+nJ4O>$EZ4T>cI24>PS8DoHl^#0LD4I^E(5^IaKrbC+f(dm**S; z`ON|298PXjaklHPsQ5x0;I`iREDnAc?tJ36g zA$&sSUm6=&4qtOQmyYUQhEI#=qbeP|;XwMXKoowga85Z|rjRx6%KmsD&=&|g<;zCJ z1~NgP8OYpKk2E$1GWv4huPK2+{^oS&*OPzt- z8L{qMT+4+Itn^VbebaXnY^Lx%n-K|gX52BVJ7b13ka0NBNuQS)r^a+=l;FG~h-(?$ z8So#P(H`myFLXL1$7W|Be23HRjBWpHV29XZ zY)3_g^s%P>TYhx*Qv=S;Pr}#V z2e(b-;}lx_9bw2hcQ8Ht0_5a=EPPRJpO5tB@*h52kGvQ=bRP9G%9(KFMd#25@Vj|E ze1RVMdF&8$IN^n#JBOwY?hcoo?acYbWa0C62ksRWhtr(T-;6XK!q&eL5|5%6Nw=fcLil;ikoAZ$OGSH(gvM9F)J{fz)I&-R3eo5QG>tAy&m>NjCIX%*T z?Rj@@;goa7V9=?bT<~oXufQj*%md{dh76p~Ar1M7PYq<%cNBI^h7UW$C)a01za{vN z{QhgNo)<{#WS-v)ohN+V{t4pbrN8Ij0DB+h;JI!uFH8$`R-7GP`NG#nb-(ztKXJ5i})KiU}j>j%j5 zdq2PW;IqN3`kx^WNMk$xJKvN(eMOpKr+wma_C`o|LVlf~#Si@HC&X<_$AW3Ckb?^qu2h;o@nGj7wE*WeHK z3(iZhf-eQqPK|bY0^wt$ol}9dmq$BY!L%czo#%sTCq_Fj2dBI{+UW?Ty*1j|5z1;G z<8*}@kZf8tq8+0Ozd$DdNOz~t*yY%;^tAa z!Y`u->=R&r{27!lfO01Nfa`wquIm3Ud>juS`8e7Ci_kvkd-hrSgoWv0Cww&Tr9G%cn||YCsdLih zW;(_6G28YT=TIelLARle?+7__aBX1fb2)vpLQZ+!9kGXKt7jswm}g{74W#$cJ}*Q2 zx%BFcT~ES(KLuO=Y+!8PV`u~Y@UM3-#sQx1flu7~$E9!k`PH$3^@D@^x0Q~XzkYV? z(9e;V^|PHr^uWZz;RwDn&6N9g@EL+{irbb2#&8iQY1 z>eci++8=*j_^+--dFdPV=(~gE>n>j0z2Le0z6QjXeJ%D7$3^ON0me!AcztE>9|p^> zzB+SwotBo)zaRcrk^bj*L#MUSCG`$_I>3BT51ZG7=Ii{>uDt(+h26A=LDUWNMjxk! zU+W#(KlZ@c7xZI5M|%#UJ;SH{?HJqOOJ28U`YfkEP_*m4U%;&oFCCXa$Mf;OV6kl5 z3&!ZSeZH%0&%Zr6wy|v=Gj#mpQO?{Bj9b*-Lil2(TkI|nm>Vf`j7ppqpHR0zO zJa3{s^1Al>TqSMPjt`(KGp&x(gTu*hiZ*}MW2^WAZxkyB^#asJAk>} zQ^Yc^qZM9I||7n)Av6C*1dD zD0A=cP&czcyR9yEXe-j2iMBf%=P$u_V2)V-bJ&l@r?*ai12*F@{3%n`JNU+LL-@hRBjdi2Le z%zO7Dk5AIRA>X^vPcbl*$B{1Qx;%dj=YN1K&W#r=88tsVOV)jOmUBqVm(gZ(llTOU zI~9xP^SbR%m40bCW(1tTay<`w6XQR|RL&*$VhGx?A2ywJ+7pt#P3x|dbNpHyr*e*e zy`1B(gFWCJf8A(#ybf(l&+&^e$KQ&%o|t!EpT6rH%M zZp;JGPx=l+2F9U2+VwE%nb@7E=dRCz2J=OeCW0|Cg(e>~m|vPS4%(MVQyYxUKZ4sn zwbb_n=apKeZ`{Yt5*WrcjbYNC75*;Ec5dHu$XCbfG0dmolm461u7$Cd@RNu2wM(9^ zPx^Rf`|G$;*5~dkN6qh=E!%Dv#*+@Tr7fymiS{Jcd0Aqew{^6t&+9$bdBp5>9@=Bd zIuGs5T<3LMEZ2D*n8R?L*YRhpi`4q;TH_3Sl5($@@6=?R7RhQ|jx(@)1c@hw(suAOs4{Q>4mN9 zQ}*ss*t>0)W?axP?q`#?Jr%rQH*D6uPv1AWBM{D<1-k>em~W@|9fpmX1$(y}_LlZ; z9PHhtuy?ay?{>q+pMbra3fuZS*t;hKV}-qAI>=)_^Dzzn%c&3AXxZL_;waiFFt&I4 zswOS%5RbO43g@b^6?r%cJ+W_bUckPEIfCd{9hg^!G1r+h+SyNAlm7YiT{Fj(_UGp~ zuh5r$1;&aK7$bHECiV58ebb)DVb7WV4Aj|s(A!sWJb^jv{a2SAyl>&^evao4JQqJW z7+qQa0{aB?jeJk&IGx_Xx!_9B#{=W~8Upw2d^R{%)L~y}?A}>{*gzxXuUB>0ggPYM zy`ZBk)?wX*U}rw$!M;z(KRvbs>+d=5A%8Q+NL$Ge!z_%6V^OYc zC>LzcD{mvM>&GqZzYk^H4EgPl`ya^v57BR4Mj4p@r|&}hDO^=why3^98g5r-JsUW? zZ++kaV+LgZ0S$pqCo|};8W|ilgk!RBV9CYMop4TJK%;QVQH}p5*rPJd&opg(Ow=_>jBJ(ZiYT5bif|ver-!e-l~jU;R9R~WbN(1 znjlxrT{^^?VA66so###s7IQwO)8m{fh_wOJ<@|RfWnmdQFy2uIupv8e-(#+wj+U>> z^DN5J@%rMtE%~xM%rC|RG5=bMdEpk6X93E_{*bpSUChNkjq=#}Vt)PR^NVoLC$;W^ z`5&l0dmLj-ZeUU-_22utbLc*_QRa8j(+^K(yJ39rF6Yocq&wyBh0>RAnO&NPzHlfH zJX`XyuEV&&vTX+4YRohL<=XUJc@JE9kn%{A7jh19&D#q)%80d3DV$C|I1O1NjU!dGTpp)0{3p?fc-}uVG*GD_AY{mR?5%T#v z*d)*%+JbQncZ&v|!!_n%MtbZv%^x1MdotS*+Ry;|3iH7E1L|X-7kZc)3fG5!+%dTq z>9!(O}QJznz^_FLYE(&cC0)oUO2VXlAJhfxoQwxWL7UL4%l zU4Z&sg8I!x`H=2FJMPzY;Qk!zHdoc_&oN)u^VR$%axSwB_pFy=jD~y_o;+##qIB*$E7DoZaePHrMepdCwbjZQ5GuIurM#^DfQUH7h+fa4*J$S?SI| zUO}vT3$Ev(EVD4rnYP3kn1%FtKb`aTt)S0$9NdGC4PgBwj+LNa4jB!|`@*N4?roT} zQ$Fv#=U~pB7fa)Kbp-bhaLhdN1K53?ei-udUW*+Xn-e>9Zyju_t(0&MA zb6u&|lx8|%-lxX+R)ln%fV>x-)xY_{-NAGEDxho5BPe@!&^h@g<}>=7^4K;h6W6nN zo+IqXIR^GD?prvWHFuD9NTyr2vUpqMYx3TI7;_su z7dzP->e$J7o-WS^XHV(3@BO9P)sR?cXxrMm6?KidP7dY_@4^1bKFj;o^8T@~IharN z@!ke(4Cnog=uf@4&i=dwee+q^ojYN7Xm7e{1F^17$9+3}onO2Jty98 z`e4kGezq;I`=h3gxDHF`hhqZD%yk^)+gTl8JI&F+G^?z zHoxD}>(tP>eZTn8BRkm^IG#}c+h|`Sv5j~JE6Ry?RKyVv9kLd(Zq#_8^&{#k6z+SD za+wC=>kwz1q+f?RdJNCp(8gcHya?Ywbn<%MV@~HIsMF{J2>*u;GD$y(}B+Pg)r z`P6xO-tT0`gl%wYV*}5xFl@Tf?SE zLtSHCTTOMOttoQRhS?EvwA9?Z0Uyyu);6^OGuiN^a8pZ7_3X%%8(Q$$WQ!(k6|{}G zmY}{B+n7Yw)I?hA8t~K~2`SzP_W^5&fLkUVslr|pH`TPx&cLm8>@!mx5$^$j@3xu< zwj_d#n)Q*&2430}!S~aCIc=zHxd}Ov+_@KR$ct1}uE)y-A`|%5wRLSBa-QMf3(iPm z-A%P^Dt+d}ts|k1$gQ==J#_$;DXj<@sUOy>eR4{jRCwO{CTwBVyvfGr1IIVq&`0Sf zsqlaYacY9@oe5SFnyYsO5NFZ3YBQ*s>NuX`*HmExga4EOTQ>6WU;J*%c$KrjZPz-1 zJ3bS*VC+=!z9Z&)BCJwJ(&4d`2<_@fnwW~;!{dsv7mok4;CIvTaSGPfiM^2zQUT@q z+UOnOhcmu4_Sh@DaxWic?l@k-7<0;|kCj}>xsI^sG=BKX(76UMTfDLg1AAf{ETa|zV>AEI%$^#ou9U+)>Gc1hdW(eH2YK7=EnU6{U)y4i(fHSs#h7Ry*SNiO_6+|ZJ5Go@a7UeI5j3gYbh z0_U4fsbQM&lp$lIna6H|&!leQ~&q5uiU*unp(#gxe z5;&ebtsW7TAZ>7rbdTs|>2igexf#yY(+| zcYY-9)|_F21BpW7 z%K{fUaJPMs^ltkgakm|ixZ4g$+-(OW?zRIGciRDpyX}C)-F86YZaW}xw;hnU+YU(l zG9FwZBxWuy82WVgcT$C1Uq4IUkmH^we1v|N6HJbGdcJ~p=EU67PPN$wKOXR==RWG9 zYg>X?BVPDc%>BIZLq0gfd(t=H#L4A zvSfeUr}Xrbam17TQR6>^{wF%gb}rTq;3~5LWhv>KGh@yi3 zb(R0eMSha~NUS+Pb%T$dzleI7=p_4>Sbu=NSm;v>J|c}(Dy3Tmo*XaQ72Yf4B<;Ld zE92U`_@5}gypx3Gihi7+XRDS$uhsHUj)_jvZfK47i*zs2k(z4z?0*GmLC-IlXk3D$!WuJqLUn_H2u-B#FO?=)Bi-&uTBj{RAf_h2XbD#a>22IpQl53FIpLanyTGf-0%QKZtab>n}Z1nI!P!dJWA_ zgbPJ}lIy#O!rMiDlIu|2@*WrYnX9Ohm+dzk&{04JK2Z+DBB=iEmUm<#ZeL_o_JMbQt({6{j2%-(%1}V9-Bl(3|*63TL`H4}`QmBmNad!@~&` zXMRjM?`U9F6OHhqjy*ik|q5l2!bc``|bF;1xc2tq+&8SHe!5*GM273X2RiqpSa zrGe{m+2x(0XzcQ8n_#Egqv&niv@@oD8dUym{WL3_>6-d6?St9gt}xOy^%F7hW)
v|<&RZ3+f}EE^Dth; z*{)37%zu|bZ|dhh1COaV%T;6Gt5n?fpIB|+CVkkDGm?Hf%b?dX37NQAk1Gs%vmW0u zaI+rG`ZDEo81$wb&hr_l2UCtNR{;2TB#HRd`X~OdfouCi{6Pbcs5tS53|!}v_{|2c z>x=ki11~Y;=srRyX5jazI1gn8{!IhdQObWDC%Qalc^}h6#*y?-_|We$=uAEAGH@Lo zD&0plk#Qvb_i*l&&+i&^`a0=(Z^sTp$@w#b*p%~a1J_Z?@zcX@O@yPVhn)s)*83v{ zuA`LyKlBNXx?j_eo^AuYT%c%(&sT9CmKgZu23}#{pEd9f1OJ?Xvy2Rs9}_Y8^Sp`^ zUufXR41AG+pD^&n2JV2LfpT=7d9LTD#7haNP;Ag&`Tw!^Htr<>R8@ zS1w3vKvWU}BvyyZM>MEa)KqC}dlN|ThOdMW5JsoO>U4rS4z#qT)ehJ?Gl^rLIxVy^ z(|N_w&Kp}hkG48>-f17fah{2#GovkP(gJz^|9#dvci(-^NrH8r>HGV=8*#YCU zYp?xz_St7ICE{3T(XY1X+AmX%)&<_~wD^}<{JSmsH5R?wqF-y#do8-QQM}z}(XY4o zbsrz#ReHLB&8s2`^qJPBV|Aj@r#iC=9 z*#zo&t3^*)^c5Dp+@gQfqF*ia63F?uMb~jP<>zM`&^KB9+P4z6(~`f?lJg~p|2DaI z$m0L7#s8ec-=X(hhX5n=7goS5Xvg9 z1>!#<%B$TF&JEQ=Mxi4fkvYGubzRGFP7sB+vT}19HqM8mOnEBw-k-$d7_8bd{6TU)lRu^rA&n&DPwJ$&n64PZoqeHTm>)$|P5ya8%# z#-cI0D5T>Q(Mc1Z2KMA^_yXY?3THea|35b>h|!CcC0aMKSdB%{29;?o|`mD z)s3sxZfLy&o+55n z*1$oa<6KqmR+zgZFv#57*R`%}TnDdT^juiG5;8}C8i|8LSL2EW9qn`shwZ9bw?+9a zi*YQYSvOy*IXw_{aAU06bweX6=P8127~yjebvV-ABS>*&MS-k1InLHxZth6gstTMa z`5xPhmn=yp@@*vQRHFQ4;W4ZQ6$@)|we70~SR2*?KfdusG?7=p4IO+NZlXB)4O>RP zT~Zut>Df`+F8HTJyR^D-RdY>6l@uMc8_3dia7pLuWBeKo?M+eDncLX3X7i>Fl#2C@ z8=4Ag0pdJG4}eil|Aut%7vT^m@Pg-eCg=f2=8jJt=?JVKN9mAn*p^9M<<_p`PHu%e zx($=1Z~$b=5!@R2PTxk5rOWBRZfLo41ECw5;ZJD9LdA_u_G9%o7p}I>FYorvQRqFX zepwv1TZ^>tRAC|dhfTBfVRoc+%H(^{2}d1X=sgk@!YEgjyvL)FJHLyzdLRkT{95VC z*|^F>AB9YnflcjA*G9Y3D@?b&fITji!}8oxm>JqQ4Pj!7rFmWBr9)?K!0Z}SZqPHL zpS5nTLi4Bv`9mZ$irtMiZ~a(jeJ;dr9T&8nrBa ziSgFPl`UH;DsjH41?2=^Mr1p-wKe+=D$!EAj1}4Jmh~gytXKf?uDrs+1uu?4zixk) z5oiCQ`G@MNEX%SU&gE!rB<-lUF-3-0u%RNVtZdmp_nko_3Xku`ccy9aq!S#vt*oq+ zF|OZau5NGTBLduSj$8yBH#b`jT?>K3_8yhhzS}np<2In?wPK)UxdQe5?u{rVbE}$K zHqn7xw4`jy`ZoAlwnhg-8)oj@q4bK9HhfayCp03BS<%~3Zdh>@PB&MOHn+|P&BXia z0x!n-UA)`7tCuVh7x8$uL|*w`?#!?22yg49!#To(V>-+0-8^`@^L^jJDWBu}j{C~G zQN!yU$iDA8^ZUEw4bN58ca8TxJIMP(%+E~FIT`Dik#n85=3F7?<4c6D$2h#s3EFth z+bd3wad@4pv+@H$s!<2kRdI6cPU8^Ft8eN}4&NnsTYt{uD^8Db_?;1WzE4n`9^>$PBJdBN z0)C(1ZTs=Pg~qGLxcDEB!1H~D;`A7Ye=-8kG$~GxarkE=@SM+8oE}?Ie=(&S+T;2K z!Mg{=Dg9EcYa9x%^-%d;{Kgo3nnDpe&2Rq4#lIe-|31Orn}?b+7k@DbQTpq=zpleo zF)qFc>&u9jX?bPy9v|hOYCX^!F8(#}Ma5rrnvc+Ru5oz2+m6C>eK14HKLo#S{QE_J zi7w#tO8wI(DclM!{y%UpD*h+2u8+Zfr@?C%|1tRF;NAYm#jlb2 zr)NZe7e7bv7lrEeJ0DXbk={f|q6>km2Gd zW1K>ooBn-*AI^TWwfs8zKOuNG|6TiCCH8aU?;Rn2pT-}g&_6jW_-cpl#{ag|A2nFyVskg{hZ^(lNNlNLs$Fj@e9&^I&9uf!Mk+DDZO9%PsjWL{;<=qxcdJq#vd-0 zvfF0(F}x|`phjHs`AhMdudhPB+a?p)AOn{=ULe%sk3#|%Pygak_-?^h%T40h7rOYL z!G2Nr!;nkKe!MIu z+X)N<9l(H#-9V z#Tfif!Pij<1nPeY{@nPl9~G_tD-rlhBk{;oAgn$4`^E_=`0CPW<~K;%Aav{37(f zQStXh;7M}v4{7|kXNNf%|EfWV%>NAjT>P8kqvPL6W(2$ZEyA^n|5Z%=?amO5Zw00Q zWBg0;=i+~c_FGuU9)vWR5w3P#4!HfVSpt}M5tkm$KjPe~%Br*pBli)vPnCu~87hU} zN%-2>-iOrRFFo-X{60JehTWa`L>;>BoaTSVvHNpH{{Ex7->B{XjC+_u<^XZqg-?By z4HZWQdr+#}GJIxm`0TyssC@Maqx-w=v-Ht@RC(lnkcc;B?0EU*o3MQ#R$O9r>)OlH zZ5?KNL|JA@T6%ROwkK@zd~S!KWkU;gaP%4eTU?94r)NlVZ^7nrlp0@_wnq!sC>+^@>_33f(hVAej=sD5SN4j|LQxG)K(#Mno(6$)s`r6#qZP@c(BFo#mE+-$dy-uhaZN;5WAzRt8~)obj7m zo33`EoXG)q{+$8o_WXgyMLNgQ8ZZPffpmMmgU=fbst?B>3@S(UQF_|kMVyX4LN;_h zgJ@vez`$=3?AoN|f^>O}VE9&;U!PxfCTXihmuYW9*=^A=OwNbGhE-PdMy$=5XWH3{ z`N8V;{Feg$dLesRjxYzSi@1OV>mWB{yVa}zW=%+i zzsQ5@Jt`_mH9r->ia_L5#ibRzF(+SAeyg*u1C1gadN-Sik*e*O-u^3qhr(+;QGOTC zdJ%=cO90w_#Nqjkbrk+l0aPncj&A(RK##)zRQR=hRxvLAycm3vLJ?ek_xcQUePng# zhQ|q>{e>NbY%rvM3xA}!<>BMP*hVHRxcIX{P@2W&{UC}62A9llXGwE=@E3$}pDh&U z?nq@7_*^U@Z9?pl<3!NduPJ^CnGhZa-nA*|{X%Tu1p?b>igNLrK#Rh!kS;iS3DYqP} zaQuTPV~a)R;x9{AWB-}v#;dq;3DiF@7H+9Y{8eG7PR@p;PLFTd=#*Hd?uOp4!tWe?`C<%~ zqAzOt^Boj_v?KLL-3uL5W&*ybRA%|{!Tl|#!DkxuVtk#5uM}yWb-FizvXP=sLSC-{ zK8yQ_Uk}#*9R5CDfd7Fm#}6Tj3x{;ObKvcTzmX^(6E24QgY@Cc4S60-lsr-7btfLi zIYP1oK3B%k?g^t;c@lXcdX<*I-#D-FT)hzY8=yCN)37b_&Gbq-~Rwn1By>Xv-z7Jr4V->V%=6Zm%^?F)gY?I^1XX-Atq z;Nt1e#g+k`Qop|aZ{XiF?0>cf`9a&#&XZv8Tk+_n{$SoIp86P`Weyxfyk8pU?XRAc z8Nm9317AX!FCPp0gSHSpK=whO0n`uPuf{#p^VD;wAE+<;QHJ5O&%5zC*zPgd;vLj; z<$dME;C{5HDZO5S>+(Tky9wokzU|Ig;_a#%gg;04;i((+eG;4eosjqMxCK7&dW*zo zo|SjsxAKnhGw;d~AM2x?cbeW_$+uoB-&oF`Gx>Jb(KPa{SMn_hIYtl5rzEDvQdqw+ zK%O3xS7(`eK>SQ8r*EK~mZO}ySoL$BET-?8DfHJhD+wPy^iwP8e^JxF2X)!C!4kyH zyf_RWDy|K0DS`iL$0z1U?S(Pfi0$<>>|@*Ouw^eiZ>7G3vg+!=Hqx~P%a3bEmU-H7 z9_-kyaxB}c{TPS(wo<=VzrOu@W?n6>4ruvj*>vkbKl}r|gF1jQQK}bp{4C_jKcQ?- zE%FY08|C}eqSVne{E@v{H05Xl{$_e{ulFSS1iviZsAZ`ivgsR_?bLtbNF3{p@J&#@ z%STY&(ckY!ISKqPYg;u2bw}H((!t@&d(y9K8%JZNm;}8!*0$E$b5RMvYf(wZpUxDdRV^OWFp^IfTk|T$yA!C(gHD>L|-OWe!!& zqhxuAGs+(n=dvfKyqiP>m%XQXZzaZi^>`-OpMzhg zKGZSK>zp_p<0YLVCx-fW{GGQ8bAqY;xb8+fMjyf)XFYW?+1rISooxvH^08fiRmNGb z!k^v`zytqYA>Zi#`1^U^=(bFbvsC6W)UhuhjW1jM`re7iE9oP&9NdlewikRsIiPPu zDF@U`?EqWU&~|lrM?l+8f_)hGCj2!Zj?clir01B%V%P*Sb$o*{bLu%MC(q&jLZrVz z%E^4Rhx8%Vi*}EA+9!#2v0KIx!59xZG5&66oTomo`Y>kT_^AW&}HOu?Oy|Z7s79M8noohMK#c6r{}R9@!DU*xZNIGGEUr2 zbs;TDjI+BwZTvRRV>+0Ia^z*#r@b2H{~r9Mo+;jS_C3WWoj(yfB{#uNC_8o<;NSggHI3J!Ka7q;M2o)@abVY`1Ei-J_7qp zE6SHQ)$AFR+i+>~JoqP!q(6!XqWD{5=wCw-oM^qRG3^6*{|0q?V(_(^=b5?=`fY}u zPp@e&Kz|*76LlgC=d7m;A2agv_RRTY?-z!ysm!-?A4k!T(x1zh?01UVW=upp6RlZ2 zZ94}cXQG(}^dqd2cn`+VsYz74mw|tRmv0|BMGl^$AwQpwrhUV`Nw80p9Ii`?qSH)K z^smR{Z#T+G6#rlhKi_Rf@uwvnd`CD<&UwhlIq8*h&cmY?eZEE4?-9sv(=)hcAl;f$ z^wNgv)2GQf54Xk8-xNCELE8GvvG~(+&O;h}3~stO-KBI}&T{0J(rtaXj$Y|De_Xu! z4#JIhXH2}+mV7&2-PhSoua;}dpX;k;ejkwY1)x`0^qq3feNS!r9-&hX2eKNTmlM)8 zjU>&%H3R8({@!NMr}-@Acel`~Pq{_kBXsIx)4PRE`IlJyd{@ChIhXpU=6Ao)DaWQC z5<2Bfv-myB&UQZ9`-K;zOZZ)THVECdXU>vu+w%cSj;-fTOO8!{TGr^Sokhp|Y7n%1 zl7DJI#k(juC*5At@QOu0*Wxb$9|QR{ExcwvG4THR{;B!B&f>>&To68D@t+k?agzO5 z)4GVg2P>f+)>NjBP+C0B5#mFyI0O@YRw`oEvvU-k4CPT&?I9QAG7^Og;Szx zhupx+$yV;)6o}snz4OxFgYe4vCNqPy2hy-@lG-AC-oEarT87M=y$*;R7yFR2cBlb9`c!V7Jxvf28@i?eJ`ZrHG@ zxuUwTWm$^7AcxqcDcI^|WYrY=GMc@6hRV2bY;Hy@sTheAF2$+giW~gkw3?^pX93T! zAIw&+--+3_W$UH>ek}%S*6naDPq1~@(pzz{`qFIn_N`mFL}^ni))!%?%IuaoM7+=2 zsCb`4KY{l_p^y>EI61PwGC7Vt&Ie+Y6J^ts!=3a9@- z4E|XukcUN&IQ+yI{U4P_d41lE!_Pv3lDH}ADx0T|%$<3PIU}zf9>Q}j)9-%AsCbv( zy}l1%;_YV{%1_E8dmAsTKwbtteoOE^HUGOc|Hz*wS~w;CMDUK8J;k?aL;`U2Uk0Fy zC2QXqKA_ypr}<-Hwm*v3dht)7xn+vT@iLg-$_oUxUn~+X{sv@R6uwLF8V~U-5-xsE z41S;BHD8G5teT6z5s8nAU-wb4_2+Dqi^sZ_NPL|<4A^+KbN#5Ik5f5U&C63L~-X$eCPHW)+RuVdG}aV+PQ%^QgD$XmROd z*}E^O&WHe^}~Ol{RE*W{&k1nid^gH9O~fSc4(M+wJzb1-UPYhmKYXFL zD>1tBropcnS6{o)UhO328muLmiF_(uURIqP@0oRmxHpi*yU8QSJFoK-{WA}FH4Eo> z+saekz(TBdOyZsD^tAV)t_^BHdK&Q#l=Ohcccf={2U>8iyfIVL=gpa44!e8v{CDx` z)A4>8_ma5RLfw(hSruz_QBi=i|hIgwhUl_Z_VyMd@tnVm2;!RwLwR6}9rQX<8>^%$i z9&=)B?_a{+d_Ua6H3%!c8gvIKrXhzk&U!jiGv?%A74K8$a-`++j1%c&+L(`&$-I0n zE-z^p*gttDY((4qH|6X0zw>HL9zmDfS>8b3XxN5!9qZe*W4gENh+lVS(Y6h+H~Go) z+?ewDOhfq?Z~xi2{?6cK3n*g>@`JWH>mF|x*5xI8PmW1`ANFi1@~Ub;zZ`$rCEmvC zV5jV8T|aS7X#K>;?#!>B=!KkrI$4^0j`?yD>jW-d)KiKzJy@^z;uzHVB)=r4`T?I*pS z?Umj(+KTnC#n>F>3g@}SWz`thq_)Fu=n7NRh370+-LUf;W{p(o(P^-izuxjHv^BM8 zbC#m*S<>_C+ZXqsE!rjDbM78oQuN}$h2B7ab`*F{1Mg_GOUa%S#YMYrLA%ronZGJ7 z!a7WU4eHsQ-mbnf&*azO_9DpgYTkgY`>Z^xhMvr~g_36rgFKrW%Cj0P&-x&z-^#Nc zf0wE0LVMkVI#HIGe6$m9+6R$d^lhoP(bn!j9qz)q3AE?Cn18VIi!+h8*lTT|EM5F^ z`LwYQl`cYCiu}C))84K(pzGnWomZA#lG!$9+M*{@wP-s(J;*;e5qIC)+?_6W;8S;h8l*0a(kM>@?rZ{=uvP9`aYmj#fVBa+fuWH9)wC72$Y7hH^;^fgrq_5@g z(PkvPd05ZB0Bh`LVZ4*t%64M3x2px^nS6)Q=awa=9$krie;aFlF&0V9{LS_sFD&-n zx(~XuJ}yh_`10Er4{&YF%!2X5p??@H%fm&xi(V-4o+v2?+4BY;o|+h!Drrij zleZ5Ddkvn~Fdov{mNE+PPe_z3Em>hRM$!foSmIag;jiT3)4W%T$01ktmz3;2 z&3kfG$#U3%)rf9&zwSjKuT&8?EYh}ms^Q%N=9AK-pTPN^6ev7 zhu)22631_&ZF?8(b_w|3MH@C)lsvi^Wxfab{QaV{?;nfyYz@lPiVvh78e4{b3S)>I z?p^Z-sfVVutm$X_p2V6(u1zf~_G&2iKTvkuHEq-xnQPWPmNj_W%I-?^@cV@1U0x5@ z-j${N^^;}e{qG&#x%F?+_9V~`6k&V{KEbk0F*px>5xyC zTc0s&q+Ok;%l~N|PM%rDvHfoTx%!mSMkoU_(Kk>xt}_O0;J?a7e&3$A5p+t~dq7YJ z5946!+5wyIkI0kUYaD~4jhOF`zdLb`+PHI?XIzTzK5^$Fl!L$1y&p`vqtZUVAydO| zRGu4C){ZJv-@x`enHl&w^9XBGul?gp|F~)1uI>0%>YBs@UtT!b+juk9LT*Pnn3VJe z-oUsB`-9Hk;+4;5pOt(T{b;iDrkBqe#mD2_y{~x_%P{ulH!w%AX7jb;$!2Vc`GFMU z$zhFdGL3QyS(6|)hcPJ6DgTxP#-)%IEvL-L;WM3E-ngclWB`OAYRq zsB5>JTo8M|^G6&Dmpw-LjK9;dFVoufnpblKa@c0D9>M-8=AG6X<}+0+fx50(@C4$D0AjPkz^D=k63@fm2` zB3ZLL>FBb%S%-Ffxg2@gfwt|Xh%#)}e6P&Zl%gGKL0M~oUiw_M1mlM7Xj3>owsjp` zl6dj$N#4M*8;a4+lwhoze+KX4^EdSD##-^iV_{d=lWin=s)6LQnVMr53p1^(H#yWR zKG*C(x#f3#I*va!+8Z#>*04Qo$#Edt2eu8&^Ky(G7iPSgao}p$zR_Rnci(wq}4$4v^#=mKdIj8e^ z9BqKn4SfXj6zfuq-jg8jbEr4a%k)*hf^FGXbwh3rw$`)^K;GdcetTd)Upx)l>R94! zd|w5d24mBLax7Ra#{*}c+aLA47xkoFO@DUxZKj>whc+RHvYLXf_2^qCOW(R1cE>n< ze;4%CzV!>x`4;r4y*RRO{R#S3e%r#nbqdB=Wf)trZ+#B(eqJ;s=vzPffyobDEPZPi z?p^Z-lTF`BJxm>h-7wy)pi#UL_h=L`1}+>=u&lD~v#zu5 za~?(Oe_Xv!d+C>`b2p#ok-llLx6D1In0DH~6mv7oM{^JQ4K;6p^@A@!Z>sIf?oSsBh3=GVdQIL(X@s zXV4vZ2E7ky>lkMFzG|NEXOw}V<2Oe%=_{AwM*`E z&W3$L>FB~f0c#ag{1%CQNeeJsDJL3(r!)ub2Sq;Q@Teb+92`()Y}q?2uX z8rQwR>Ri*^X1*eRo=NK!%LKhc?JmB+24r7xH+5pb! z;yG?V>oD8>eMlSo4bx{JuQ=XgKd*bDF`kjgPK#{VDv;gj$fjJD3(BV6lr8N&+K8eT zkGGD(+}dfFs~L?sneMiVbcaE%I43j}_2oCHAD6@SN!0t?Sd1CYhMlGS zwBWl+ZO85&gKybzeJ13v`^Rv?}?pv%HPF6;mKg{*TJ<6p?O5%Yzsyluyele6vs zX3MHf&D&@fSf8Ji`do+hj%nU2-^=X{zL!gd=B0L9-^`sL566VBAwNu=k@n5R1^KMkltq~=XMLmn{C?vie+&RT=94Gm z`~7QBwwIxd*P*OmjWT~#&+*n;%me4=CTTC{50~#>FiX_tuWngK&&^)pg0;Nw%R;*C zLvJLSBu2sno{{nmC9Y7@iaGGL3}4Y)kLaeZ#w=27*|55`b|`_?J>+W;>~j)}(AYBg z@wc_K!RI!Y=(*u4vsYufqctu2ilu$qr|Yk~*~4ENi#*fnYPTu9y0u*xTtOqecI;8D znq}KtTRX%qBNFvH@J%Gwv!SkqOX*E?zG*>&yV@S zxG&;c!Y}#XD9*s`CgFXOvLbm?=DU~&gpVQ_h~38%g#G*pePif z@%vN%)Ku)kG(bn(Bac>wdwE`vPM?RJ<_xJY#CS{382_hGFpbkIgwBQ|3QIdS2+tn! zID2eT?@qa&=6wWWb_?wg2@#n6a$oIa(l4}fVh~9CsnDd#m~TDa6q;Ex7MT}*z7c<( zR6fO%Hb-c7+)E>9D;yfr#Sr&h=^WgfVL0#^E3g{paNo495KIoo0OQ!K9QQhfuLJ21 z_#*O~Iy^4CEOP;`Ba?hD3Ey3|%z`pH&XmXF!q;cZi~O!PW%6jY27;{i8Fb3;tMe(0=+rx=OxKaZDi%~W{b*Ke3LdyKI# zY&UdH2Ns6qhJJY=5m#7ufxs0(h2dwWl3r0r#MPG#ohyb4!ySgs)kB4$-q05o5^=TC z#Cu+dUTf9@W~upPPoR1tn~@(bWhI6_HN?Nh(5Hv! zCyjkB4AK9=&{>NM!=D=Z2MUR}y3NqJvaK*IH1vxKiMTq`(91*g=S_NNhUoVgIv+0! zLxZ7TTu8*#OGdu^wU*D)Wb80I#Q(2`|B?{>fT3R+qW__xUlyX@V(4tG3&RHu-Tu1K zzyFTWzp{`SSKl)9xgq)=7OUu>)5U7KT?1|AImyu4)Y( zD`cE7+0ZX=sGj#DqmTU+pwF_y(9FY+5e@+msd0xQPAC*MQKMV0rx4vQr zn+abr^7G{+Ep)#4nP}y^mdNWtpKk3gpmM$s`4g@3qx3&A^n7_cB=R@n8De7aG>A1Y zLZ>gaiB>yfo?mhAJP4d_eWlH@GQ)p@f4cRRztY)~MA`ZKkTcP0-^}wfc)t#NPPf`I z^DGSd2Ms;nj_GsLzaihJ8?QoOLSTL~JZi>77=}^yb?oK3_3>do1bs_EzQm4EH8lyHIGl-ax^d=v`#6 z{MjRCzcKdAw?E2%o#8JJ`1KiQj45x^0{Xpj_6<``^6j~n7`~{U=!NNZCf*AJIlJZT z%Z5K4qF2VyXTttbcKht8X!^t$`i(JkcE3^b_rd;A^ygu>3Esj$f3;vI_$PQv1G+w; z{bCIFaJVL*>xldVkTcE8KR2nIn+*Sz0l(^S56a0jZ%K$=g>l6M@2U{J+vu5p?lZ#) za9wZoTomx@lh`*+yvqXm3^{uj^=q2-oT&OwH~d!w{5q1p+32}AL_fpmSsTzbUB|$$ z=fO>!JQ73yehmFVlivLE?;H{KL6hE0AX@34Hu~3v=)W}lntOOo77ki0O3#BR=TY>T zY=4TT2lA1869!_+;XlUEXCdRFWehLc$Z}Y<$u$nr(rh+(hvBj=6AP6|GGu* zl5ztaaf0fw5)nDgWDaVetBPQN*kwbnve|5fE3uX>XIF-s1bJrhXZ zY0=~KpCUZJt^OJp>z$o0wGZ{N^J|90OZvAgJ!e~VTaNZDv*_J&&cnSHeVaw!Z_#c3=PkO;f6SuW{3Xzv zf%@D0DT{9N+x6P!*ZNC2-|$76-z!9p+F3`U8KJA4mA=%9*S0g(y_i7xcK+%&2KTfQ3{Y?_d7P-u3vWfIaN9J5|7&-#mQH_T>dt>LA(92GU2Fo zgyqE6zf*WvPHg(KLU;LlWB5CPXK?fJutm4^sj~9XE+^w+@@u?BZ}Ouuzo{7dloQ`@!5KE&QpV}&li~8X%DL8}>o|w>>nysCKS}?v zMQ_421L-&Vr{?!wi~bRdzQ>~9V$q+p=(k$*=PmjQi>~c6t3ytQ)|)Tqs9dCUuDsswd7xK(SPXh zYkl~I#eajv|F*+lCHJ&{p+3tk{%O*`s6OlDUPkETzscfX=J0pQy`05=v&FyB;om3s z_E`Knmf)>N9RBYL|FahVCxphs4;}s&ZE_SVg9bWF>QXw9yqq3v6BQE0_nv%PE9Uy^EeuMO`|s?7PVU_(3=HR8p5W$R|B z=8LPC&uvSwLF=mKO`EbU9nI^h#|W{NF3#RnS8W03w41W)TictnceJ*zjg+3>7&Kxf z?1JX4&8s#efa-D>tuRJpyhkvA$&6@Kr2eaS^Iw!`;W`qeB+ir4{-6 zY88tV zbw?vkq)HouSIp(6#@WWzXg;vHX)S=JY!(!1HlrY3Cl%uQRft8y*tOrvIYbRJSb z<(xo(j|xMw_zYmxS_oa6U5y3!UN*aGYh$);Gn$GmE$tnf8`p(*iZ(1On{RJnRqa@n z3w&y?ZQh0+VO?`abAh!`0UI~m4&HTeX{~)k0ccfwGr9#Y`#$VJjaGkCTl1=x)h(-n zD6oTgK$G$kh~rrQ+-g#%S|<*DsnT>!5^x0LN@UE93D-yqof@*--J>d4-s zvO-lW>_Vah8y@@NGzK?Uwa7)zkHOYg1!KBld2tzq#I<`JHzjMv5=#}O-*d`rPuunQz z%U)5 z*MO_cPVIB?yYF?qWtiXGsPJza`DO?Cx#qXBBFyh^Fz^2c4J}+aR9t#0yUx!ud(T(R3CW*} z8`#j^{J!5?(|do0=h+GR3~L=Taz0LuoKxUB3witvj&XRMGqUkpPXVuUJT{*1Cq^27 z6L=YHJl_|M6yIjhg?PTV9Vz~<2s~*c#dir_=Obx<&X`Z@nRQvAabzm4ZS z^+@sEg17bOJnl&GPejDe`QDM@b)Lx9pYy^a#rI15w*IuqNbx%FWaIh%Vx;(2Bz{|e zzPA`D{`Ckv-&c$j|EAz={rMhYq3H({|N{r>uLD2!yLieG@fhxN`H13ysqC?eiwfn@kGUch0Gi4`tmsZ zUyK&K@GIX-k|3SWSGpU2ItKrMWSp)ikHc?7Jk;Nb-g|wFk4QUqbmMxe7<`}LS!V6v#?N{d75`nQ`>1J>08v`o%r_&J{QF7pSba#TN)jI@=TwqXt@0U7nJj;^v{t&*&Zuyp1Sd06@%{;yna`pVqE-=82c@qN!lGkB-|)eGK@ z&%J&_^UuMr5d3YbxEx*lSs+Bkzkh^yuFHwSk3Ywc(2dW%ek}%{8X^Ag5P4MmQv|Qy zHK`fg`2SV&=jSOb|E`EjJ=M012CwM3Oye`2@GaN`^Tz2?R zDZl)#%?@7>{1lsra~FRO`gzh^{QnTVm@$yz;%fzu{XYvrui&K_5AM13VT#(%!M`H- z@&M_dxcH|O@9^m-X2ap9#^itb1-`)H^6x6aJL-EY1b?2RmutUkrT=v8-y!(n?7v0u zuKjlkemMJYib?-I!B;!_yY@c~0~Aw!9KKg#;&;=3k>K6>(-(tx<1a-8q0CAr{(iw< z?$9-UJz~FlgLmy$UFHLa%m2q>>Q7GaGh*UbGWHmcjDM%#-TLdM|9nZmtADrPU4Hj^ z9U4U1!cFHBf^Snr<*0Vk;~S%Vyu;?D(=NroR-E3wJ?8mC@foMVcJ-fx_Rp<*l-(eP zFPCxL7SLAVPw{Hkry$>nf^e^Eo;d_|;rtOXF8*C4C<_0i;M0;|;@PFT_^(2LgLe|~ z%6u>*Z1oQw@oWoR{JAmsEyzl>WVf*K)!5StQ)}FN(qU3%&su2IABB zbMb?qMa93O8aEK^_^098#b1E(6J@`y2>kR2{FMMl#s8$>ZTnpqf&Vu0Eh_#wHDE?~ z(?6K>&%n8>|5<3iqvB6X!?oNhKOcy||1>83Z6ER@-f6}E!3g~Sf*xs{HH|{1k!_h`$zpE`Fu zLjwERW1r`zjo+WadlD}WU|;Y4Y!UW+F2>%^@RxEYe3!!ChwcA>`ysv_PED3 zJF>?+d_c;c)9@$3J@s=%nS1N&S?Yj0vwrkxeZ^CsaPVf3U=zm$#zhD_? z*fameWhcgV(f4N=d?@r4!QTV+GKVj^wRfWk4PsC89k@3c>7hTw?84%gPuVA;B{Kp4 zB`UM8xO`SJ;Z4jToqvtJ!OOA#I@37|X~Zf`{gLuWpB~hYc4GNK zy-WRf>E6IRlrociYcMD?e~2^{lmVpAlpXRTPvEnF_MZTs1zq%YOMh#*&j$33N^iuz zj1`cK>m zKWR(6T~EW$l-BWm@HvqjLA^c`c7MO>uj`MDeg?RgJG$}|%iU(Fb1f&cT?-SF*EYf* zUG8LQ7t3e&Y2JWk{h`l5RF!Iu!z)AbLFpTvFG zz^|jYZ*0Ii`dh?#P-RM-MrK*5w{aS5ninTUsKg?_RdtmwR zoai0Q!8ZcSz50ggnwUA*3BLz>&%)np@I{Hb#WK+KLGK`sd#B40{?fH?0*oY}cso*LQ1|*tr|NwU}=^;EUtu7s5|pk+<;@ zl*fw`e&4z_F=;G(Ic@v_>is6P6&U85Hbw8FZ`^o4?p=a5`KiR%wbRhH%tl}N$7owL zFR$HJ-oF<8xXD-K=K~Tqd|d8=?6uIxO|)Sd;1I*`c_(@`(w4e>aesuHLhV~$xIrKls^PNYK-$fkfpw6)j)Pc7F=_UU{ z*r^fip|-o%i5*AM=CZ%mwv27nJji9c{*<(jPX+DcN8ZEsklb3 ze*yBGL-|y)Q^p91#8@38xa}zOnPcq){6=aUM?G0DnWw%lpwwgP8x!N@S(FhfH{&x0 zDJzHZz%i6(`fbUf9D0e={gi`zKX?FsUw0Q3?RpBnUoT{z95IG@ntcPuJ;RJ)Z2Lsn zi!|zKw_lS`W;dZPXh1$S;MoK`yBmtUgG{4W;@zmel-#s2U2NxbDAyP_%xgL6kHd1M z@YRjH&A{&)^L`n4)_^CAGR1mFzWY#Lra~9yUpIX9U@j!Z^7Rq?y&q+4vy@x(VO8~z z$@T9>6Uty-Q3i;G@- z=hjgebDoB0Ecju6CjX4ZvWIz$UGVR1`gZydq|SIJKkopZwRrD{jOQCp^X45!n|K7Y z*U^OcV3P7ufmuYK7HT=p5-OUz7+lFq#>SB z`3x}s#>|BK*{RU{ktf z1AfN@e2pC)?f!?1D#lRw@tXM_Zxg8d`A26=uXD7AqkMf23=08X)q@SvH zCSSM22vtP=4}yIj5bcxS0iQvfC*&W)3jSC=NJwW##!$~6=N{+07!1Bne2(CH2L9IZ z27=kQC(>U{%HVeUY~BMrTJqDle1bHDei}c*=L^?pyhH{Bv+g;-!=DjoDf}JdO@wj$ z5g$O*PuB-OO4(69T|lOM-QVnmkPhR_-YNA`FesCO81%s=m{)0zPUCT>OYT!g24Wr- z+CCBr((1?aUAbSv`v{a{biz?-%!k*7uL|e3JsF251(Iv;+&I!8rR9Dd?-%;5F@Bef z4BU$I1wC$LjMF+BDU*9=GbrkRcgJnum&j~Hd>m*fd@ z);ESHmxky+Fm%q^7KZ(X&J-1fuNgXLbqhnMp}Q+vupgA6bH=wYa1ZBvP0Qb#Yv>#% z6^8Q+T^1Txw+4-Td4X@yUo-S-i{hVt&(Lc^^s|h7d68}19Weazg4&}0)X=TTcDeU} zpM_FnJ!|Bou~Ajh?1eEx7R+L!VVZ<3)|3UmT)O zFm#N5op8*=dyYf(yq_34SDO@uLx!%kKY#BVhJJpYAZK?PI^VDqhFyjpE)Sm&I@@bE z6no)vLY`tTTuyv?*ls>OYzLnnwu4U(+rg)Y?cmeHcJS%p^!oI$eSCVFhfw(ceU$hn zDYkq=sF9jb&ZiaS+biusiqSDnw8}}n0A4^pGSMnmD};Ur=+nJ?Ju>|u?y;t`zjec{ z=r5<6w}1W})34%m0Dn>Z=i{BL?HgjB5A&8AJ@f67wnQ(1KHa==^cit!LYav--_9ug zD>#|x<=aCuwgFGUGeYZ^NJMgF|cb&%i$&|1{{hXQPpmuh+Uq@(A8$#llD%@D;~5Qac|o{u;}G- z&NW{)eU8v6=TjEH)*$XNz6f+}8MUS;T_G7c;ojpm|5Asp{MX3|q)E-Sny*G+mlr*ADXOW6)l@>CK3Ir7M4(6|bG%Nq+jiynqU3KkMz4N`e9r%xct@Vr<88%QNG( z%i#)PaP_}U=&t@vLU;Ag3EkCyokQ1hrDaC-RQE{9zfU1gH=ywJH9 zx1GNwxMrZ7yZlr0yETUXq($Fu@jq|TKW@=q7dqp;*`kk=bJ}@>MISHcw6jg`6gu_1 z%Hr4hHBvcgOO9;^EpL?HX2~zH_%~W~jhFnNwCHi^nj$=Iy0m?x99y3%;iuhNE%{o` zT|Kp&kZ$Lr&d-s4izNqZX-pv9uGcD`_WX#&ul0rc+w?l&p`2Qa{}qdF>!Z&ZuAaMr zXJC5ma9_F*f%I+u zsrk*xIq6$0y6Wk+pAQQU%a1*7dDN0)xBuNjciYd$9r`@E-Y-XOC)JKmI&|efBy?BL zXNB(S`Ml7XUR%!>gzoD3l0#o9*FTk`tLI^duKayMclG>*&|N)`3EkDRU+Au$#~r%1 zaoR7qdM1&62Bj;%K4&vtyFEz>zpMWgp}YF0h3@J;dTM#ldZBdX zzru=lv!6usyVRn0SoEZvCvac$iX^Sqq<_NVpJMSZv*>Ayew{@xx9HbebgjRXf15?G zw)pw1ry(OJ&N7C2gTGL~vx7p&;Q}VBi;a{yx)_wA4ExMi#CFgc! za^y5vbUhtP|24{F9m&5Q*HP(hvH0wKY_jNjI+UE8GT~^;X|(8ezOS@GOUue;1SoDlVzgg&PcNbgqoW*ayWuxz42J+AKPtETG7Qgm&yzzO5Kc$$+ zH5R|k=JsX_%z~Xab+_KaRTbQqy}7-jN>+|wNA!-C4X3*Oz1e0x*rK#B0)HV!MNL!l z>c-9MI=Bt>s?F`~%^Oy2%kLr{U@I#ZZNYZX+&8oCjwJ=G5&K{dog}PiY46Bx%5Cmw zYQ1B_(3^`F>g*fEYzA!YRX%(w7sxV`Q;(~n>W;>>&DqxW?7GIy8&>5`MS^ePdD~i= z*G2DKA6U0y{z~{az@Gou&b+ZLhkf}gmY;e|X8U+|JL^IN`YM_I-$$Hng30f~e(9># z_3N8Aba>5MvEO<@4e_^P?`UdnZ}+hMCl0Oc!Jg{{>xInD>xwl?rW}Q^JScA;tp>7y zYQAK~@~f6_xw$f1i@){Wrfr*6VMlT7Snh0LehPxCs7WohI4`WGL+Mg6*I(n6^=m4@ z+&%l!tFQmaB}Nff7}i7u35&{|%6W}VYc_A{$gXK!iCVdN1H4vj>S$R{KN&5jDy+s7 z-%YLS4R!?Wj-T6e=eBHUYHowK4CJ$_wehzh%wR;?=7)z{m!Du>l%rqYevSlZem z+nXCVHD{5;rghD=>rQDa_sGnwYH!}S8Tp6AwL&mj*$r6ch9z&c+>bqOoA3OJH9x)1 zuFKA@*m-WZ_E+EDil%QvYkOr?Q)}}kDAB?UWhpnFKd}0)_NHsO$GcJ9tk?5-bhqaC zB2%-TEu*sv5PP3jV83{-`x~hoXCWbW(2o$}CM?R<@L&;vEHrY=qQf{V$FRHFmizmXTicIw z+jR$**<7WH@;<)MV79Q*q&e(q2O~L<5cKxo!`^E`6 z#Cz63LDRPWyR?+4!=U;(fI_%&v_EX=`jx9Ab2}|&Zj6& zk8${Q5qPFaae9oyZxy_)Kj&u@r^h&ar{Hb$jN8mX>q&Pjs;dS1@j-T^Uiqm5p{<{*tZGX-~DNc`Z_}&P7 z!ztijioiFX0{*bz?euegO7+%boc?_gc+Q6@PLFZ;HwACoujv%<$0P8Z=hArf7#F|J zKilyyj=-OZ_777dp*^>CK3dcW4jQi#zCR3J+cD*L@!0brGJc(>s#l;KT|C>=DE)PQ zYRWM9wE#!qUm+1e*N^#EdHd`2v!kN%$3)Qwlu?eZ{xf6Z?}S{2e$m6f3dg@}baeb% zL1)l;L;os_zX$VOQR(lJiet}NQiiMlBa(iZHq&UlCk0;~Q2Y}Yzgq3*2skWwEx#(p z#UGB*{}+O{=O!t`#eWfmsPvbY`Y}E_jQxLt`v&jm|NIyq|H3f%YhvPmSQ0+N7Zd!t z`X^%Y|B&E^OFzFciPHbL;Khuw@t=KKG`>U@VC)N!{)wyqRLwufevb;?<#(_D0fZ?1 zyGMxsHq?s3KQ8#HVf4Q-M*qWtZ=!H3xcbuvY*hS9&-6ic!{DzO>*MV?Zz3NU@8dt? zAA(;Mr^g$R!90f=1!0N|ikBBqt#5OLey`x2?DKMhpW)y&UM2sB;vE4G2tKWha&+;_ zQBO&8_uPG4@VYKl#klxqr2e@0zZU#d1;)hx?}B&tY<)%Wk_~|rw?4Is{w}^x@NWO* z;_3+V3C;)X6RXU4pM32LDN^e{TGT1nmV0@BWO|aAG(m}{QGCdPn})-Y8W&M|BB!b0mDE% z+Y%T56zm^`-!p?KeD5k5Mm+11i=Pn_f87U(#)tF%!Nh+q&RzUcv|my2zascL3x8e& z{!_^OD17=NAJ|~w&yT=2YWkf*)G2ta7fgRy1pc36;(tQ$Tdnxh5%|wS`6&I%%YC4o zeiljBem_$CJNmZ?{*W)m*nfHizBVTQIWv8r))S_mMaqr;IO<=N{&j-4?KdL=KT+|H z{#zsPED~<~DYd_Y?~c%)S>odV3N)j?gFhtrI^1Vq`bl!}*Tux2oJCaryJF%=a`Ef5 z{cz%6u6W#MAfD?+T>Ld4MA?7OB}94NEB?XYNpkVnw=gpOZL+w;&cDhC{F>5e{64|g zS@CoIgB$-jG5Ed+JjcQ={u1=xQTaDV7Qxv1bA5`7|8z|GYY@Dxe{}?YKLkeEzgzGP zz%VfXxZcH$|D))?qx4UT;&%Pxx)>M#S#7^?&kkw9XQW&)ey)dc@kL0#6fqm_H88Ug zz6ky7Al*Sgx~ytWb=nHU?wLA&Zl%vY@;y^~p`(V3ImzIAuhTtuPoA>lUJvxco?)4R zW7zMFd-Uu$neD}1cV@rG&z&so)%_^n>wY#}*egh7az7iD*<0l8pESxF$YBpVtOYv2 z{d~Ar)DQhnY<830o$wBj_5k)d+8gXwl?v@wwHy0Y8UB-l`?mwfJ;Ut1jkqTb(^`H! z+lzU&1L@<*-frwERF?F4X@fN4Cm;6{(*2nV_R7OvrL<3Z!rM>z+{>u6)EmIwX9L^E zcmwvHaxF-6`H5_ASz^-BMcC(T;fXQ5+{1|1ov;D-{^~n17)~?j<)Du^&EUs=Nc*`r z5cQ_~X(tBj{~6azi}yRQHwnieS|^XQEJo~$!FGZ{pWTVM&QRpLp6|OS$u!f5a9Su=LD2^PR zv&{+1C%ibaH6=YP|9^siq9sS|%{j-3R=g&kz{~Z06D|4Q#We%EK+yB@HHBXXeLNYQ z&?eVh`)uFuj-hMby8J8`3|xC?=gt*A+0fBF24T5HN4FOQ)rWEln(G3`+7)xL{9|*= zhTF62H)AbDMX>aTOF^(iYBTmc!dkY%^+tG!?&2yot!izJD>yL8jG_2y`={TNE0ii!@bU29ymadS(1bL~x7O>(LU zD!|g!vvEw#ch>s;n5V}7^PC_9e#?{fz$5EI??5s!at6Fa2IAcenIm|u9~!@?WiX^Yh(Dz% z50Cr8O{2N(@_r3e(p+}McN$dw$FA$7K8-&YOL|(G$UPF_>7a2sL-E=VvEI8jA^u4r zs$JNYFdi4r{Efo*2~p!Ao+KClr!jbKQX6oefp}tF{6Wy7dV>aOk{c!cU^BtF^e(HK zTVV}B)5DEH7tCES|5V4I%hd?bAY-)1pwF1#y?f?){qNkGz@C9c*t4)0dlq`T@SeK8 z9OEhIIZ#%Dza;*eu&40ku5;eOn2Y1B-Uk!aua5C*{IP1$thZ&n+GCAZzh#YAx#um% zM1!*5>0mH^JUcXgeAn9Vv>RhLmC5m{%B(N;=J$^C_9q9nf0p`8!q_jjI#W}IJz7!7 z>UI43?>c^!d+D)WJ;!tFMlgo`6~=9KqrCkXV=fre=fQfs#iT5a2}^;C-w|I6+M#)f1hFMb~n=8EBkw6 z&dAr}eIC~{KH5FE(yQTEpXX)_-t5(Ehpl;i;?5-~SBtS1<)WTfrjrdmhR+NvBS%KT zXMyPbcCmM$#d=1#`@+I!gu~^?v-jD48h0J^?_TZIbg%UFE4v$e43}c>(ly>L)d)yuLC>Q2YD| zWZLE9$hokKJVX8r_srhgI8q0Vm-)tg(R_Ocd%N25Sw~-eAKFLehw9AsQDv55?_;L5 z2lbOj+N1~eLAh9siKl&3-G1xh(cIy(Qj(9P-7` zvH!B+=e!f@8RP9meZVtsihUm24DG+!Zg8#-)93~Lw_8t*?wCV+O42%9J=L`GNL@-B z{eEZRsIvBb(1|qG<5KkFNyHu8N58~2%V5y9ls5Y93ia+nnb!F|_Q$#(_VL!g?)mlY z3HV+K`yP51w&@f5vD~mv?nFOJ`LtWIsPm@DId9ilUgn!qGVm1vn|8xKm@ll-HkQ}y z-}XyC$3Bebz1Wl7Zm;oV=XdxFuRaN%Iyua9(ytGF`e@l9y&LuWm}Lv6KO?zN}F6> zocU&1;+&(6zZ|TmY&;4Xy?BrGN8UpHLLAKRvQg)@vyEU}zksg8#r2{`8lvD#&#ILopW>==CSukzqJE; zaon~L^hGG^nwS3$Su6{FT96mW8`_xd@NhO}J52jfcHN25^A}<~`1HGj^;%yFWluSJ z!%k-ab|E75gE$Ab%ty_&6W4<2ASjFUU<*r~}y|2oi(a)r7b>fcE3K860x@S`o! ze(e41<5|0~zTq5^IpZMj}LuWlqJt?zSX?cWzp0|T37odIJ$T3DW5t#CbvmY7$ygf~M1I>STlDCg? z@Ldthi5ngk0^fyjJ(Y&n<;11yz7Q_`IPeTEJ?E1{bls1_OwW9YkM()&aVeQS)}o{XW7kFn3aF?Ki{L$8jp!vit&!!dNW zWelA2wp)peq55<@4+Mc7I0N}Pcdp?sIUyb6o*-!XB>jwln%|d?^fN8G|K96T`3|>Y zzP|TrzvEP2vxTAdo3@Gto7--0Z)|GLcC=>iXm9C&qm#(Db;F3M;Z7HUAbt}3-UJQ* z=1(o~{=WPhCV8RvfBd$cUm+K~K^$7h0{b1{O1}RyU#pr=Q7g>*K=VCnh56Xj|KgNP zbL>*_p3jR>zVP2_v3*gQ7>QC%g1dh)3|`xF1-ST?G59B>9cWUZ99{hLphxMSl6FD+ z4He_!?}@>~ua*hgPsQP{h`~QBjfM7qad_$;rT=lM^sc_{b*b1dR2rl+Y}1J4$ha8v zc#+_JYW}xP@Og@VrZ}bZoh@nk9^Aj9_*axsj!EbGV*tBYveZrR8O!A+FR7B^b^Q8O z;9Z*#*(U438+d`hHkzVb{QnJF6#fOlzlr+{#FNj(e@f^$6of-~NMcYuiD#GU;<-jJ zD*jglul6IJqs0(DCjC#ydi0&ZFc8lo>Eij)P^t>qf{>BH(l{q`z+Mn8tD3hUZH3|9 zg;rG7RQfz4UyoOPQ9jCyIn7{?ve~BGQO%k7qh4p@Zx;T{oPZ~D19%^5u9I>ck9VWv z@Lmt=yZC3XQA>^U=KvvMH=aw)I~T4?OJmNAz6U{1dBwiZv$DJB=T+tp$NT>Kj$j^) zzR{F7>Rm1U#Bwf@anK(jb;J9t$G))G+f|2W-CjJuKPPkDe1840%o)r;+&?~vb$9Tq z$9F=QN88Wm{RZ$i7~kK%&pFD=K`;0aK1f{{Pc5+T*F3rElS7zNcgU$L2NnLF4@Ibt9Pn{Uzo)(n#ay;XfD6WU3MC_!eP3B=w-& zh7s6=?=$WBYJK17zHhi2Pq{iTkGlMx?br>SqUQQh_fmWZ)Pv=F%vZC<5jJ4n^866& z#`H}a7(C$4C6nIRKRDvKWb!|PxzBIIe!qGLI2>s&`)}~!d5Md-W8=DH_ozf?BKgFq zL|398)vL2O`RJ&`?&9R*qY@7nW9}{JLxA@N2NS1ra$xzm#}nRd{%Cjb;i-vnTqQFn z*_K$E>_}LR$4KfO^N4lH%$vm2J5#q~>Yw`#fV_zktk;E~P@c0crr@8H^==39hqPG< zowF-Fx(H?RzLR5n@5kI8-^;UJVa~s|WsG+~=M0yD&U|5haZaCezQfJw!*p!jcSptf zG}fPLoW0)o1)M)jCbnZ9(VwG4d&hTxM%P?Xa)Fi!dw!4U=t?9PP#?}64yO<2_f#g{ zjl6Xa%ACs8x*FU+-+OBZ(_6eo7ozwn`#yWlUA?-%Hsc{18(+70u?WgM{1Phvd<+kcilmZkp_YsI;yrLWXGz_zXDjK%$aIseo*yG?sjYHazK zlyfu)@A-XZPf&;QvRR&0cG;-RFCW2tH{(rC@OE`eneG-`GU3hF^KLJ>!0@5$D<8|C zZL_`7H)5#sp6j43lkZ?vCT+9~<)U-~e78$Kt^Jr=Hwx~bV%^B0O|HXQhA#Nr=DP}% zO&tBQ$$PdQT6f;L^%>J94%LQsLXVutoCTR6>7j8L6m=iurxN`|N%1y8E1yKiKxQ@7I%0 z?m6ds)>`}D+Iz3P&OUo5>H*tYwoB|+CEfx1zsojlsFca_Gcs9r%sZB!Opn})IfC{i z!tW^3-F!AS%vr!H5!cFL>?+vfW`FCHHihAteVo|_@ZW9+XoKb=T;^$q{w9L;)Rg&9 z<^Be2EmM#A-O1cHZod2XR+oirS(o>ZnN)JR^;nh}?JHAe@24J1nNkmAJ~H3rToQ); zsL;XR^*oMx*!1pqELnYzrF_Sd)mMr4soPM$VdH!3I^y?_Vvc~Khh4}1PPP%yJN+zCoKYo{TWj$ux z9K^9w)K~WDEPIZFWPM~>x`(j^BCsc!C-0q^)6crA|8dgCAHjD)lMof-TgcVXB6fkk zHa>+JxQ}UU4bRrMKzv?F97hd*e4e)x#{tXU^Zij2&_4Y0Sc676_4}mE#Gbi7Q53;p zysV!;&9fDUC?Kw9&7&hA45mF-)0li=+RQfvtI2H1CG~h)#Ya0F#iReFaEvaj;};6o4npzlF@;kLQjf1H z9KD-$+@)~sXcN!It9Yu10>!1m)lUk3Op#`!V7&R6!ZSVi0}7`{F!i`c;YxE;9&{*N zzO`~6FIG{BB)&B?OudTUTfSWyFExbXJeu$D$TpgH)h@j^J}&OfHy8Kjn~Qt%&BeX> z=HlLbb8)ZUy0}+wUEHg;F7DM^7x(I|i+lCf#l3p#;$FRVaj)Kf2>LKHE!jSp9LL;2 z-*@w$fIdtSZ%I}E0pWMye~NOEFr?(36_=rDn1VlZtkxITplO)mRt|-*4dt;3=PBZC zYmEy#V-$U|d`+9Z747*H@fP-JO@9&2e&1_8r|@L{n10}!DxS%FHu20;@{{dbu9nZq zZ*tDW8jU}y=#%=;rtuSsK3VSB8h=O8C-vE+_d6=RN&QiMD0sfFN1<01RZ>9-49#>dz>c6=hxA3yn~9^2tQAm~}A%&|WJe=qo7gR%MT^d1XH zZ?4coDW_HA)E`-oaz(g{^fga0eQmcVs{}6FjrSu*>W^%HWIm%gR0s3nVtvh1L|+px z7r43PdmK|n#Eg8-RpOdk<}h&}ZfN+-lqqq%S4kW-`igiofjZ81fzK590|Nhmz`F%L zTj0F{|DeDl2#*KD{jhtfelHMsuE1jgpCj-}fzK6qyTCspa8vIXuIX#|_#r|6Q9=KV z#yJ+AC-7cDf4;y^2>Kj>XKB4uLN+e8%<*VJOe znx<;(>=NHnU!`{)X^C$sEMFn^H!9?E5P53rhILidwZ-|R?gTPDAx$j_kzZoAc8J$t zw~>}6ZsO!K2WvPnDU;uPWu{E&8ctk^ShXU~So_CwR5C}rg2r%>E8g6=mdk0Vh;b*B z)5)=t&5e|CTD+xPHsiFpi%y#}zt9A%l73ooFH7Vc*+)}=rPx)-glli4l3$|tJ6m7d z8n3TmP0-SlYn;qmkIm4U9d8{_Qyy)J1%*LXY@^RkQP$2)@pyGxRa|axB&(GFens(5 zHC~U+lj19DH*MwyMQR>oV!f4N_A9biVDV0(98RfIDzINwS&0pLPN!w?R^{KN^kVD^ z6>r2^HfEOX3bm1tOpVz?XEeH&pYLuqr>5?f|88WKQGbI4+6tIo3YBiUUT;!`ji2IM zipsg;Qhq^gTXk(S_kx0|pH}Mf+|;ou=)`$SshpE!v9kez0Q;wv7Rxe&>7k zlqpdp_yvjjb%E5)WAMz~CC0;^x>XG5#EjjApDqioz)n|fbcg3oauK?he|2s9mU5{=4 z`vdr29wGnd5HG**7igDy=`ivCRRDjh=D%PV{(m#=msJ7xX@1lG88LSHqwrfY{`MN` z4{830Vbo{){&F3@8DAUn!Z}^Q~&I7ZZr)22R1eLtO!TyU)g3Zs5+x_@^HGiGX zDDpEZHvemQ?#I7Ezv`?24-fLQNwE3f2D2ak9v^;IX`7#ZQ9u43&EEzd9t@w}eVacI znc~O4nhZE9)vwD>UAFn_0>VG6`As?TmQ~v3Zw$!4r+xUT(l-CW0RCRhFVjzzw)wvi zQ2v$LNhSFw`S33ZD8FNxzXCiw7#+T?vcumSz#rAlFv-uiNH+hi!2HUuPV>wBr?1xL z|0U|b;zv2kqh0fxc7^fh+ZdbwTvQ4_{)c?R=i5M=e{4YdyET8}8&L34rEUH~@FNMH zn8>!+o<-lxNnQ9{D#by}B=EpfMG4%sC6%Am>AI9&iul z?{O|LNOrg58vw+0*G$Lh&M1H9N_^OW~sO z=trMbxK*+7><0>0KS?3D`i6qMt;GEb_s0B73P;Ca9Xk}R2Nu?|Kc@8`_5BxjJg^@}XT zwK=0C$Z)lpr*I>m^t$_0IBjPhCO)Qo^IX_5@iBOj#_jmT1TNn#Ef=_n8=+MiH{q_= z_p9~Uglq6M8n?r(5jfiw=!idAIi`P=sbsOUB%Tsydcz45h zFBa;)hO$6KXRiRC5;>Lm@V&QgZT7sY@OZKd>#7=S8fxjLZE0#LuU~O#H5QAv7L6Z4 zLCEYZa9?P^>C1!-yNLWqi$*RcL3j4>Ye~#&akr*Ndh+KfKecHjg#M5AvKI`f5*{CK zEI^RyXUyXz=+B1YH}%)h+x+|Sz>ohX&0yN!AbzeF<;VYkW+*j``fP{a2;7hVIZbc+ zPb0?WFA3l`z6qP&zW;lje+4E0eQsc+aNO;l62BT&aVGd*_$Owo!h4#qnMC5S`8k)& zV1mqPRQ8la+-sZmA2q*C-mC$ZuRJ312G_5E-{vJrwl)cS^+S%&C*j}lXOjrWU*Nf& zr|N+=1uNB!+jmfu&Hr^^e*Bf1--N@yhf%Wmxo(&rf13|K>1_V`fbe%|elzEV;nV81 z`SSz#_h=Jb#-Ac>{%;4w{~^ua1|A*^pZ&7U|04Lk);q>Cnh@UrryL+pIX+gLFNgKR zuF+pI-wg6cKE`kV*JO|}bs2O1Cw4$%Tv&fOPU7`^{C`njFUI*keZ2(defoMC&fn12 zw4uoNDg1v&UtguqSL6I$eZ5+r{}Sha)YpH7^Fe*R2IoWi`Wl?Sr?3AS=kM$5IL?Rh zU#ah_aDG@{uf_QX`nnqDAL{EGoFCEGwK)GsU$4XYpY%2DyGQkP9nMGeHTCi_eSIy? zKgRzCecyodg#r#|3zPK)8|`p{vZ1KHk?`PET8TAd^^rh>+8?z^XG6rs;}?B`4{^7PJP~i z^E3MT^Em$s|4{8+8TgOjd)hhpN4?l}3C{4-v|I}R3hdEcJ45)UaB|1Jl@lYERfL>@ z_E7tU^qJUe9q^mDD0o62YhU>y)rPcxvPDF6N-Gx z31#3v^IOhewqLma=Ju6y7CD`lgtEr>Uh@4G@D3~sIR&|&iJTjToQyG-Tmrr54B z@wF*k%tz+o9ba<>xF$#hX}~)%e4Dr!-}5i(GHVa~InPF)XHh5mtn@GDM6qVC&Tr~c z26SmIbS)EQnTfJI03Blf&w&oT0v!VFKrVDB@|jGfL-c)yQ1%&PmZj*@>F7YND)aQt zJH9ccs{>^(zx(!Ei|1rG4BuMG_cekO=$QZ1daFsLg5@=6%Js%l9 zzmqoM|3t09k#_WrLaxCv4(W(s4UV)FAMpO$&g+jXhz(4w$9fp>1rK7+4%X2Z;49{N zANG3a!SxFta5`UHqA~cJd*SOnUX6Au6nT>Cc#wwlZ(yyBUf_{2pL(+H_qXPBy;;uIex&B64!Z-Z(`X6GA@(}h6NyGExdLdi~J+cpLK0n~7brUiUyWcHi zU4lcVyPFQzBk)6cH#TrR+NbB!ruX$=P3JeT)RV4DMkhhBNTXG^eEQ2WGt)=b&fL z0R5~3y3Ms4xZccgYd0|dW^I)>uU?F@T!eM!|5>eR!8|l^ppIZ&p(%Sq>Cc6o!)fU~ zVds%FcXl+_DVno8v|#60XJ^`k{xQy;wEgIlkA%}Z#)f_prg{J1*!1qPp@+uKaC>V= z!vT4doOV#Qhw`2cVXE~4jZy4Fp}g&BPLIK2=`W`t*h$Z&Io)ZqLAQ5NsK|K=qMu5e zba0&WY{yO|j*Uw{IWF|vxTp*sGWLc-Tb;uo zI2>BAKW$7$+N1~4#_UNeqSVs5aFHS^j#5WS2&MJnt;n=;k@>Uvok#%6noRt z_ePut(n)eKJ$+}yc_cj@PoGLpe|Mbo(AZf2IOpB55xlP1K8{J<9?5+q5_&L_#h9dh zCQQ30=os8PGnAE`SK~BCcA;MnrSA-dwj;~HnK;zCPI-~KP%OPBv^>3%-vgLqaPQ<$ z`lR$3so2!e*xhOA&!$O~`T75LUdZ=#5yx$dB3wfuwD4E#o48&B-`OGWvi$ldcU=Rl z6L$%08Pxa9KThjp{mWRCQ(BLHYMRr2cgD7y{yO$ksDByT;{8r2bhIA#kh!}KG`q3q zP{rWjq5bH0uzt;V_F)|gUU!LgSiYy%!1yoJ%hT;wq)fIeM&|#7OtTJC-Ph7OkB76K z#2!E0mxeNSj6XG@`$4p4@laM@71yglyhv;Eh}U+1%Dl2M+P^Zd3!Q8FqfY2(9r7z3 z^lT>=hBEtNNP7m_|Aoj0=<}*M?K%C7OC0^d3y5#t8VmrbE3{s+Cl@cEf$8-`u2y<>F>e5XjouoJjB$?6KXb;8=hLS4 zy$-wg_3*g|nD#f3pa1^(cz@6M*nnB#%=a8KHdC%He}+TVWWnaC#@r|Ac>CptTN!r6)@KHfR;GV+^Y zPCxUk9cRsSI;(EDp?~*#i=Np7o*vM@sA((4J4X?Zqd6hx#9L|EJDx&$GK?qErtV;0 zmQfD76ZaWoSeIZAAUuU*4a3*}DmL&YY!vK6d;;H^uG)=!cmrYXyP=hJb9YCkbCmUn z>+w~if18SY=KF4@htJ*j_)MF-1|q|`1N2Ojv$d$7_rt}Z@LbqJ;Lo`0lWKh~>Szpk zHXe06p`*wP)|s8q(QB~}%@gRWGxYOoY2%o6qZXH!@w;<^tN-TeGZr>)GL-> z8tCyp3|)_l8GYch58zpFUZ11#+O!?yA2{>eem}c!dKTJNv~|5`C+pKfJGiFW9g}Hi zg${IMpS9kx?wV_L>CQpgbJ@rB#i6RZGlf4)ZyZM@Ppyx7Y;BxR~ zV!fsi#`aI1k-dXs{ETqsj{DD#T($vYuvf#G2iD$o%eQt;c22;c>|DI3=dx=2FJ2vO ztj7P$?JqVu+h1yATj*?m;WFp&iNDT4yE)Za_f@u+b!iz-K5<6Ij=8tTPz=tALu0>q zFUBM%wxLci@6TT39DOwH?7lA0Vt>VF9!-n%Me;#2O#B&l=0gV3|2oq31mcc$7zcii zdP@0oQ14@qNeug}9Nd9?Z$SOpfckaq*24bR!xIj?i997Q!>>nK^}v2+-gJPL<235| zS!e@zuiJowvJG(i8h5=vW0MYyEAMCD;bSReNuyMK-|dLAtWk+OV?kGJ9l&u%8X%%QAfl|Jk@_+_P8Yly;xv+6{M( zjeU3egcF+?i>K5meKAJYI5A#e{ z!Ebi((Xp`;VXXgj2y9m2je z`;~n$RoEA9U-T95+3T}?pnXmMo9K7hh8X{rvBxsu@3;iI$noa>S(r10wk`u@wG4GE z2YU@+tU0hR?fm{8l>0K!yb_KaQ1lqTuL_;te+ky!ii55S_2!OS&+iXG7WMB%t+Vj0 zWl_I6@T?ATj-2g2e?j%dh%@X*mU9QnnKoVr(%*{o8~>JBAJC76`6K-|uo;xEjD8U6 zuoH?sOqwIgZQVX)7Q%S}I!TP}p3{HJ-L!AqxFpv)w)4-(rR}2Tw`sXOK606Vl${0H z)wixeekAPC4*AV`mCWy{D!(fO^82bpeos~T{X`rZ}Phv&pHzM%{7f# zC(#G${O(46cObvJk>6hm$Zwm5@qfSh9m$RwX zzHO^vUq)bm7+qHVvJ>j#T)x4xoU%D+H{OCDO!*teI!9kR%Uw6sP5X3J?!9Nl%9M_W z&fZZ5+p-evGHuHa^b;r1zI0>WVjbF5wmFBUp}j}DoU^qA_2+C=uJp5*=ZjG%rlLKb zhPIgPuxx`F4(yg<+AD0cb2QB|gn@o}KtC_4J0J2fW_S$kJj<)s{aV~2BW`_?ZK^V-<>tEhtm4CA^##P z;-xuggYVB||Lq*ey(^|{IK=rx6wfa~9YVX=eGBY_ZRq3X>Na)S8#n(piu*0_X(JtF z>;pO?PH}f4P18hMHSNHK$d_Em!djgN;Lk2A!#(YgX&CRYu1C-xJ)d?~-#*Gj9G_>q zkM&j~7z_OhKFudY+aC{&?~B5|WS<&`&4ZVvt1>Yja^sn)#zRqUYjQlqb|!-7mC$>} z72y|WBYf)O8Tw3J@AheXX=gD_OkZ+rr~7Bf>el+hbh)}y+|Txa_ja3RmlN|NvTsaT zM3)!aFzQu3Y(4t#I7VJ**?QDP(hxra{m6vAMqp!btjBR8ZM|uhtw$cxaje)5o=3H< z_h`Z=IMw5KJA{3)Y3q$IM|c^u6UX;qZJJf==dWEnrW5_>(Fu^*v9-Rx0_8LDig@Xx z(4$JopSW#Je`7&x;9FP32L1=i=+V#_eW;A9mg9QDw)6UF%W-V`$#kc4=ft!fFNP;_ z9npbX5Wjt(V|?eJzv)Hz43GT_+wqrB7g$cWd~HmbowvlyGoFVVyS|Jz=hJ9kDp240 zQ8(Deu+DJo%JO(ilm-13Q*~J^g09i`$uMMDY(u)~LtH)rA0pS4X4{d8a+5atr>t_T zXFh@8F+wq<1{+6)6U*8Ei|Jqiak$VJ#gzD z^PcNnAH+DE`+KtP&73%6$E_G!%W?QEVjP}*X4Vc2eml3_aAW@=jKd#Rx%N+~X zVH}RMsBw69_-r)}XE-mWP1_Md9e4@tF5|$m$9v5az1TmGearJ0E5u-bZi2mQkAvIM zhRJd8Ow=JW4n~2bjDykPq>O{>;Kw)PV9cA$GWw>*&PXrqXZBAS=wGJbIj`k-`8f1! zwBu#^6X;9e`1Xs?gEyf+M{d}xd~WS#_Fa6Ga+<7}3V-Ofge;BQ3R6M2aJDr{GT$GlpEFeAB(${6m; zsP7oBO+nv%9ozgmlx4=Y*u#yn(EqajFzuK~bC9+O%K{Gj zqqIq8BHhPP-=s}KU*ILsOJkE@PJF^9$$(7~!+kFLq9}aHucPhB2%TL3|3pj1vrd_V ze7gtsN*&t2R`kN%>v#^Z-QYEC zF+X3#D|iRkhcc&s@B+w$pQmNRx}ScCeD zusfehTho6Z>S-0ie`;mC^giez+qS1NV-LR?zF_WsScCpCr28qn-`sgctPJyhisnKe z{{wk2H|&(;Y@O4;Lx;sU(^fF|CLNh?k1GkP>(PP1l)BC`H+7x5rrHPei`@T&d4Axn z!Ib*{3hKAjBD z=V&{!g+GgB&oqSI9$a-0Vaj#``Ida=^LnuF=x*@2ty&reYr}>Q=_K^ZVKVM$XQe63GpfWugAmp+C&3hwV4c85q3s{ms!h3H#8Uqp?cO z(J;1KH*7jHSA+G7_^BJ+`5JFteNmU3uQ7OA+R^^_Smez(q#5%{Zp`R9h4~yN&FsIB zhjY>X&ZT|4|F#**R*LAf^Ic0Z{u6nC^iD@UF#Vw@am*(|+KpfQ4ft_bmUZCCb@sI! zAIo=W&H>n)4@Y9oK*oMG#c}r=x6bJ2yHy7^grY~jO*i?5`6a|ocW%pD$hWD8{~@#k zZvI`Z@{i+HynBu87xPTmE|lUwN6+QRNzCQQ_KYdB#as^TzYzJpm}kOrXJ4PWE!;VG zVyHiJv+5THcwUAvLgrT3W1BID4{dkGRj!$SO)5psEp^e4) zRHp1rxv;OFGoidc!m%{kkbQ2w-{m;4_feM2KUYuKj%Dpyg z9q@@W|M^bH_7fNy<0s&FlYRozo@_&UO?$$AmF08=Q zkcs%bf-z+##!2)Uun%9W`_~MNPaZ>ARH(9Y-xcJxpWojN{i1)EZNp>p)IMlxfA+_$ zJWG@ZWEg$#7T2_!V7ClV2GVtK->u>Tvgm7Ioz!{O(MaC)zvc^z+jk zH5S;6yxf1uqKCUKTlDbvKYe8vbZ`U9r1$*T!xiu|?#KI2v>nBaUymMh_d3{etpG$zIhYI;NxjW-)$O$zG*D#-8j_02l}RnabdpF$MBNQZ+zsZ_KJeeo;Whm^>?uy>IOe)>A$89 zaXy8E`3pxmGfNxL!#_ZZ%ESv`{f#xLkRx^Nb|xoo$F5KI1j(|lGF79bcV9% zcP1^{L;5|5!DmrIoLD4daaqL}XTF&ikPBI)SqPot7|ZaR`WGFO%A1?+%+C!oerKa! zk3(K;pa^MV-NtwI=p$X<9c7}vWl^4y)y{rj!~=HVe9-MedeCO_`+RrKC3)$u<=N0< zkylk;Fu&XvPx$Jzd_%t#_iw!v>pGbedpLIqY@Dgi6(|qXAH0M7E#_aqcFV0sJ%#Ov za!%M&W#Rko#XK9eFI-&-b;^NGh059r9QYQ_>1&5>R3IPVOE_8w-GJYJ;D!=M<#7h` zxC{H@b)hWLE}y`j3#(|KW?TrLFZ_4)(9JBgoyWs-=HhwFOq3nM?E>Qfg>0X(6%t|0bLyNIu~&SPsBmo zkk9Hm6f3L1ct+7+4?DiEG1okgB)v>4>mTV^ACG_7jT6&y>c&e@XP2SwR-g`FjJkYL z*Sk&SU8viMcNwbwB5d-rtTXVB_$l8IG2YW=CLQlNCgL@HG6pl@vo4W_|Fm1*Z3-jI zbc8(yaY(gcE6{%My;5W<{WfmfL3zE?Q9iU$qnIO58XNdHd_-4;oq{^l)j7!5j4Pb7 z=g)BW2&GRb?Tc=|l7_L2@)6yi-s$@0)6|}fIj1tZIS-cOzucwhC!I*&w@|M112LYa zFIK$pudt6Y`qGn)aW}%s#<+;S$NSK3AIBN~-nlcJ_9>X7JQwkOIF35I4Rv-R@`Q5W z)0`a9vJ`K4`drGyT&u~{!{{{3!3EC?hzIFyzt`huyZChI<}s#gma98;xOUqsS5Ii8 z9E45wC5#bCLqA9_Xy6Avn1Qh}!n&>s@ybD-(LYuJe<$qBtSx6bhkB>S9_|P^^BMl} zn=7Bm*nGzh`o51NpXmp8s`&nJ`rKKeGJB&qSHYKk%RQm%cD#!HU$rh(B5#<7yl4G)u*cpfP~OBN(EW_< zZrkul#7X5ff8AUmuXRU|IYIM-1ckK7=v<-AbjSdgR#d#_>5kJJ;3;&-BInLJyt+}yQ+@# zs()e{k~|DQ7yU8oG2_!a4rxW3avW_T+vY{c2iJd<9B=l=%h4_^MO(NeW$Y>KDYix2 zw>c-`4A|{?WOO?RDw2!d**@cw|PJK zcnIxi2=PrrI~qninvOPo4D7J6u){)~x6X79Vh;GO*YQs6E!~b%egyCT--`df|_&8xx(s zjdc$CL%bbj*N(Q+%%$u=xj=6ce*6y5+U*(Z0OhmK;B)%%+5b`AC1{^IpojaR-@SNO z)&U(Kgs+%bFVAR;z6^a=_S!VIf64u$`;pEb>~;JkXJcF2fQSph`z&_+P zbvP6Kv>9Xd`1H^FP+p`r{WJNP_xsTv45Yy~4m%@;G+_N0#0j<=><5+~=h)H?>IEHb zeA-~NzZkD0cpkZuzMOM2BUi$h)b;w{q&qj%mCnh?+z}1s z{EBkkPWx-vi;#tQQ`QeK<|OYj*hcifU4s6Da{{cR`*a}wk90>%|(`X=&+@#0@Sb4{~SD2jWDv&roA}JIqH_5J6A+Ke|7NCU*UN_(#-qA zeS?Rt#{Ds*Y4HaP{W;36U>)yAdfwJ^*LsnTcsQ%C9Xf;<*!H|L z-WdpLd#qX4fpc)34f}=jhE5`_m=k?83u6bSvkpA&9EYsF zZ$Ve-TUmxagnm2NEZ+$8j2*X>E$bML=n9LG2uMttdaqR<_k0Xt-6Q;s1?X4g0aQ>IB z8|)YC_qFVA+s{~pxy4xr>hR43$Ay__zg3@uu+8|t9dU?3XDFjmT$}fZ<{cuR^Pl=) z>;W^jGQOu%H$IAQE;;T?$~<-BW9qv&@(%URsltgpoRJrMc-m7lcl;djK>s}%Hue>q z*I68jWZns1Snf@+dm59#v37K=z)|Srv<-IxK z%vT)LOQyRoJaz8!^TzM^@?)4s4!_*-_a;XE1^v|oeBbz&z%Tg1;{FHGZqfhNhqB__ zfX!*n!%V9^_fEbW`80Sdz7Xqw2kq&#Us%+??9@fi%=w?6xqEzleg7S>#h1^F-HraS zvvq3&=A2JDF$aCrwfN_}1_$wCzErRuhF`V{W(gPd=%n3#A zd;;%a`7Vs{Jcf9ZU)qQfZLhxH`LK|i^=E$@%Hq%dwm=QqR<({cX2tf7=Y$tG^F_TZ!dwyW^H1f7^_N zzpXOB-?ncUf7?DEe_I5;wi)oV4W`Z5aOv;R&vv@s|8}En{?z1WIS6Y{fF1o+voepaBLjeaueYuiV^E9`08cN*kt zqyMgA;rsWi?T24&&dEP5zuJWV9=%`f@GyS0!UC*EX#U2rD%)`DNI+v}Lo}-=Lv>6E*>Cs;TwmwD^h>xxnR;Tm5;wv2zjl&l3mA zIORx#(#RtX&N_LdMdjVduT}i>^J+IXV|TmOs!bbwQ#o>ZqouU+ucUhOrq-s7Ni|!> zO$iGMme#JT+T753rP01sEmifco65_FjIF;OaNk-k;ho(3a3~@3=FM|H)mUFs+t^ya zuD-S=n%Ew3v?(1K{mAkoZ&_n&ZNo@&!!I9(jm_m1mtPspEiB0|%bydiZm4QpAKikD z5V0?0v}|79yu4^`L&KJhA8l@Fy0*5uHCkPeS6x%Qu6Auxg10Fm>SlO=3Uh zA|_1kf$_x&4xcYql$hw_^PDmp^m9|+$kVPdI*XHga*X9Wawm%aefZ}wn=)5TIB(Kd z)BZO79z6eI=9eaXd6EkECyK&x_Vb)VGeM=}W&QjDo)dd}M3j8fXvG5LGappgIPbCe zXX9S+;oQcCW3?T>GxkjSN|r;qI?Bg^wR@~MG991;%>3ZwG#;z?lZdOEAAJ6heqQPG zoZ&pJpD*QkjemTic+{`y_Vb)lmWU4^(%`#Ib*MD@&@etRP4kpT&IFZi*h=ci;yI;k zlIhOHqYh2CfM?%$Pq{biZ?aXq4rwndL9e-lgRIg zk}*R+dmu;#%9iD}RMVwX297NJb3afX%u8qK8I!(>hoo(`Xo+q2!S-m(bU)1N2Q}uW z8-{rd{?7_>$BU#)rY-B^-mm~=6pcWJ&?aZZHgQ%_0zh^Cz( z;}t;Lt7#jCp=I1TIhe=0qli2CmTSHk$|Mo@VbaXJ-LCm|<2u2YC?mI>B;A9WZo8yQ zN$&(Fif7MiI$kH_Rp>eajAd%t%GIDv& z@c0TQ9{BA+6`M(xN)O*tQ9>I$_&a!s?l_GeyhGtl9{fs$H+%5Kvj|=1!M~&8)8fJF z6~4)X&s2D;2mfi5&}I*Qv%(A2mhqPF>A*<7O3>zXrYet z_bUE3dGKn5dsitvSK(Z(J@q&e<^3(GAnw|tgxdL&a?FkLc3TR@oBvkvztw}^r10B3 z_{9pRR;C_LDIBY{SVxJ$4vG8ijw(gHKg>hX?;bl+Ya>yhY)6dhpo_ z-{HZJtN8Eq;9C@amj`c9az5|D&sX>tJormW4yPce9`~vIDNY4(_l`<0W~^ApR}}q1 z3w4|~6n)Ht->dLN9(<+37khB5ErMf-2mgmCp`{+YDoUuFKPksqQQlsZg7N03DjzQP z;2RWP;lcN*_{{g<_o#eW=E0v+^p|+>-3q_dgD+S3au0qYO6Zdwyi4JidGHE_U+%#t zDtv_pKcdomg$J)y_(~7HTE(-}gWsU?;YttwxT0U>!9TC?PkHc*6pk;EtfMbV03KKC z_KcwV*+=Ks>O7D6P z{(Y6MIuDNd)i~-s_=gpKtq0#8CDh=-OBBA*gRfWkCp`Gs3SZ#CuT$y$3lILHiqC}! z|AZlUOUb#=v1`(08n<)0UFFY(i9PI$oF}w=yQTl8iqC~=PZLASbCs6U9FnNxd{oQX zZQ;L&lJ3I9INosGsqvp!^q<$^K4;+-I^3Jmj0E@Gq{H23;YYQcRBFe;d`KtFNtRco zCOC!bZ-1^~%M)dgbC? zJH*Ak`sU&bdGZ`#C+5Kjgq=kJ_~HQE+n>5}y#1+*d;3!tzbHVyw?B31z5S_+d;3in z_x77E?(H{S+}m%uxVPVQac{rr;@*DK#qHWol=<)NH(lJ@Z@Rd*e{^wg|LEf0{?Wy~ z{iBO}`$re|_Kz;^?H^s-+dsOvw|{hTZ~y4x-hR-?(GL%+}jVjxVIm4@eMqAj<8SvNTRT4ZzWS$ zwzuLz*y#{=$;+^FSMo+*oiodEW~L>_9TAQH_e9tKXftpk|14pr8TwC+A)eIRbj{eF z0Hlrigx%zEB>M0`mD6<4B+Ecj$lQ zQI|nH+3%=#7r-;75l`9=vvs)NnL#|+Ul_mLznnpQg_6mi*Ywq{$;6ZWg?cxDXKNIF zvVSp_$2HLZDNfQp*{|jB1N13QvcFUBNATw(xBK`u^2ye|$Rh5mn4{ zFx`ImUC>WIyivucJ|SnfzQQ-hh-X6r->$C;6n(NEH}pSH^lLrzRRQ5X2TH&A998+2 zwEvBsuZR9jcA66M(JZKAFUoha@DJ-^cis-j&!0n|{lcve=)ax}=odZ?ee;v^uM_<7 z6KDtg=&uN9e;!5Z{OI{%gCG7>KzfVM^QXURx<7t*0R9oQw^PJ;%INJeTugD2cB8?! zLLdC_NofE5;{PZZ{qTze@;?@kKVL>Z&vb53y1^eK^UG*wW;(Yfa8xUGoQ-^&>3q(E ze;f3MH}Mb`{|kCJ)44eT8JhGQ;yV&}uD+UhKJnW<_{WgXlZF4>SnA~}{x>D)jr<#- z=QEw_6S$H8Z59753Ea?+Ljq?C?}MTLK|p?PQQ>Y)$U%3kjz*Pl+Y&gMWp(_glF!!C zJy6Zn@eP$fpV4=WgM0jg%7;4>xam>82mPGsY){~e^wpaXIN51Q;2+RenJS()Ch!mH zt52%@X-(j!d~a6yvpInyIqLW|@_(kY$%D6n(d1#`Auc``fL|7XU#{}4Js~1bU)895 zxFdnD&{y>;A8zyDKU4W|od;)1{o;9y_Gj9s67*<})G;HV+`k&o?mZNMzlny|FWiZ2 zm(aG_$IMy&^uGy^Gamx{kgI^V?g9(_MspQi@k z=K}w}rr{}9Uo%`|^Afs1;|y2g%^El1>f1z1Wx_T1v-*PJK50OlU(?qN_rC>xQeQJ% ziBG^i57K`?U-N#Fz9zlIO+Ng;kltAV=}p0z893~6l=>sfvBD*H>~dVKal0HFG;Wt; zhsN!4+^KQ998I`(xjZD~OZ`mOX(xS&$cG|9Uo7wi0xuN!3V|00T&62u;13A;j|$v` zYp1tV)7$CYE9A)dla7a-u8sh_SKxo|vZ~+J8fQLr3B1z6Bl?+{KVs82=(gYBhCWyG zodbCTLO!}pbuj+_CUBY0lKv4vFY}>C;O_}JxjO!KxIyweG_OrB^Z(aE{!=1csc%_2 zT&C-nf?k$ao~GgB8bL4dBLbKC&?9gu|22Wjcs``dh4Fby#OH{>Ul;gEflIxOz@Fzp zIUmwt@%{pV%XpeOi=>zOhV=r}VaqY?260)wGCyTLKda+FdWq+W_{;dTy1}aW%YLLy z$dTbbC~zsqyeDJ05;uBIT*|*dhs*jtQ_Ccjr*Z0q#El+O&d)RrPrdq@xXjN98mIhs z^)*kE^fl#6JgRZZF+Mp$rd=Zbwx;39)ECwZ86P9x4mV5l+TqInREBHXJv&^f4>CX3 zX!&-2R*G|3kz-PhYd0k+@8+ zwCmbL{H0yDQ{d9B+b?iw*Bue~DSge;s{)txRJN-p1-)ta*pA8gFVHl0x>|*NS&w!L z{5wLvv_Jnr;IciG`Ex|bk@@qiz-9itCUBWQ{TgSw3=N@oHO_QNd=>6_*zJbY!)}*E z{XQk=W&S^{afWNkf(%CA7_P+4djp&PH7&=c@7Flv`LKvju4sQ`xzErvw*08ZZTYzx zr+iaJgp8iBzc6J%d^vw`*!h;F<7tPRt#LbCSx;rS7ifCQ|E?R9`XxUP;au>GZxK@O_$| z<o7D+KVS6F6Ef^fqswug8rnYH@MM*evQ)~BI)0?a6|71J(Tg3{iBSh)aT!b_{bb-lg#7;_ z=-ULICc0Xxd@ozYyt_?d10a{<2OZ>3=S8sfRxmxYQrn-b(+2#HF4{`&O2tY=`%W za*?>qH`y;2>HM+#9h0BbD`^j`3ZP%Dahu-Emtp?Ma39k2#HIes)_Fx->i-1-pND7W zSSoNy-zsp~|F#F<9Rip7vs2(we|81nCjM;SzM^S(is@^%ZxY|Dai&-5VYW^q@n=E(2Nu*24$r9m8eUtsZ%(s{bSEg%u0A49@sV6l7c(cF{>1&?a0`PW$OZk%o zF5@{v;F3N|=*fR-8lF}OT+&a_G=B*D>Jbqib2*fK)oX}w?$+1TlOGBEHG%)xzH*#D z+gGkaP)}t0`Nv>i9T$50$6#N{_!xVD`6m0vKQ{a70@41<{$hUso-6tVXW0R0M$+w^NR&T^6A?iaZ1?`DhkQ0o7mqJ8y%$mf{8 zX8R`b(b`wW-(t59#;-zL+6kpXucUn?^-bd4f?nED{Q+*`$yU*rmv?e7`uq|MdDdnPiUWiQ(yCxt*@y+5?`Qk=9}~z$287*{at;{(^7rS za3yZ+Muz(l{F`HizF@c#U!!r_KOYwK5sim{O%%9EFY)J$aGXuLh|d-DQ6cA7f<9N^ zufXjK2kygeh1N7#Q@t+`YNgq`2 zju;_0NA)%1GunFh&q9vO|DbyJV?i&=-?|_zHnH3*1~&&UFIcF6dhXZfrKvZxVR7pl=m; zufVStxba<6PMg5ZyAR@91-=ybJc!@uUaH>)f!`$X-2%T^;0FcXF7RgszD?lfy#~X* zRp3!=2NJ(s;7bMmS%KFG{Br`|Dew+~8^0gr-y!hl1by=B1I)YD_9W?d2>PhNcM9CR zzaafx0ypDp;-43ItB~^rfp-Xem%#T5yi?#03H)w>KP~Wk1a8LHjL&X?n|UV0za(%o zz9YUz;KuJw{9b`?Pj_#Ke^uZQ2>d>Q9~Sui0`C?0*9DG!Ue!VQvK_#>*y>i0Jmy=h<4$GJpI?_NPaQR61P4{7eZEEi6HUKH(C-uUwHEzzntr#S z|C*rxTZ`WK-yae5e<$ew*`hb|1DuVu8>^eQI?b(hEwxoO@v7F=mUvC=rq-6Gtrm^^Ntd@v4@Vs;$nt+D)~swT)YFvEIFifBMSSvew#`jrEOKc6y5w|FpG|aKU94 zT^V20(9l%PWvMrATtS+e2Ao{!t+j2f^E_PMb-(A=HeR>6c5`j{%FC{d7pWDam8jI< zN*UTkEw#1fJ`W3uYig@>OyXPWtKxN4jWt+OJieung^f+EwehNT>sWcWmh&l!VZLtbk}KSM z%0|`{)cKay&CSj{)p|Mci}G>4dojH zgPY&lveopU=r+*?m9OAt0&aT*#-SPuZ@*WVk2JdhqqmAzRkzk}L6OL)Y-|{%%=`i< zViS6Mb{?C`S0aA8OI^9*%EEZV(DFvBsRj8hwbixtTWaG?E!dp`U9bwbeC6qd6QICD zg#w*Y{Y!P-hWNUw`Ub7p&FlcT)I$lYhEB1!RPu|H35l<3Y1&A$Brl$yrwTm2siCR0 zeAF^iOJ&sTCD+$CHa1-^G-LE4sB0UMLu;Gb+(H_yh*G1iqfdTbGE0YH9<2uEmtnhz z>e@}4;%m1yW9t`~R87rpi4E0jvfk=?!E9)%scNnAnyUYIB16O6-`)s_Pe8RfovcCU z{Jyg0_ZStsHP?MWLH_1ulpeY)w#V`2mind^c3i5`>V~-?(aESD270U-4QC$vx_K90 z_NkBR`yo23aA>;+?w&36aQ2rT8WzY%GynvH`Z!~ z+B!60Rk)4EYc@6MQIUSOzPeg>=V%YsHq<)G)Q@kiuWhJFWEk_RxwaY`)l{d%!5$0x zw4+642tnfiW zVbj`8wJpdnHB`iyw+XvQVH2UP7=X|;F^}DTAAMGlK~OK6+P0PtFSAIcz5Mx!I4bF z%uj6B6u-Q&c2)hx+GVIj^;He^H&m&ivteiVM*a5~?Mw2@S6+H$e5g?J12T1pLUi34 z*JD4e_&OMdXs4Jh#0(P@PjSct!u(1KG}@CQoQX@}!gtAb)Tk2W<|T@SZaQxlwi z=qZ>G(c7=Y_+Usy@Cwe)7lLh3y1mUW)efneF zH&?A!6*<0fGc3Y#XPzJLa(J-7T(f!O#;ryKNJrK4#a+_jITQ zMU00Yk|%l|<<<1k<}K3h+C&?+q7^$p`sU?Tsd?$sag#BqpaRHgW)EFvzKpgNk+5Lq zbKA){{FQD;>8&i@3`_8qy1eVw4Z$3oz@)b-Apa~~l=RTlt1ipWqd%yvYHj_N`~qBW zs;7yG!Bp$k=33WvivqL>^W*U?8%IK!KOf?~^6Ww#!VSx+?bnX@6uW05MqiC`bHqby z7w(Y0;gy#%R`$M+Lll$PoOFmtC7fJkg5Hi;LUZ!x!wGJjbMAWyubydtd4Yb<;#Tow zbJ>7PIyf$Z&9M?416&?M)kEp~^ecdroj-?2XsVnJDk6 z8XR%+(PTPDD>Kjd25H2o?g8&oL@(#x91qu&*ETm**ZJgIqTJztGJBGm9bbpY#i7{( z+eH7}Xh#%I5UUI;u`_OvM2o=R-{EaT^UWAf+sCS{)uVh*A7eZOWgLE2wKiddg!d@b zn_J*1sD>lAeiMhiZj&1%BT>;x(6_Bxe&vugBUu)swV8+1htm-@g5K?SB-ZYn1H=ZH z@28XSAw->ivIB+Au9-K#%)xJo`Ypz9+q@$6Tc~~u)Nj7}#ZS$=(o+7;SHC6dx46`~ zc*&9!Z%Yv#K#uo8p?cu{7O3ldCBsWOpTU%=-%|BEU;UP--(vhizWOaxzXj?yU;W|- ztR*V8>UX~SEx~WwykhlRq<#z4Z$XJC{`u;G`-@9RD^}9f?|k)Jf?r50_V5;|2kvj7 zx-L+^`7VQ!Q^eo->bFGw78iNt-~rO8ehbxaftM&>J#c?ok@IqJ6rzhF|oMe4UO z-y^v|J;1N6Q~BzF{!LiKa7pFQ%a=X+(yP%dubUT-uWxIM<4y9WrbhS-uv_<*JQX4J zJ74{l5GXGv21;3diCgacV zU`Lw&yAT!+89wK;kCflcDU{)J-up=TA7J1(WcUx_ex&@QZJ z9+JNg_ao&u-^56MewRK{e)DaPh;9u^; z&-wo&4S%%{KXq!P{56_i>L0&@A1QyUPx$=)cclF7nqRg*{BCNb{2f00{C;bs{JVVk zsgon+-=q1Z{;^Jsl>Y%AetsV|Qhqa!SBB5;utv)Nhz?)oKfgB{DZiN)EW_vbUL)my z+9!N|zcy0-W13%v&+p1c%Kx%Y`1~Gjr2Mb?gwOBSM#|ss!#{Hr{ARr%S$3M+e}?&P#RMD> z|4RbWAJzQw8(504;~$>pAO5qNe>Nj04x9gK>Im6|*iqF}Z;c`4;J+S%NC;Nq8p&O11Ou+hV^Pd%v{#MN&qi}K9{EwgGAO69K zE|VF@84))B$FuzTmm^C4O!G?*!yu(R7!^j6;k7o4*|o{K8+6 z?Q&GQr^K(#-v&MR<9|r=FC9jI?l@EPTls(TY&XdL!|>madg2%UUX&}3hGF=h3W$H? zJeU7n-442U-tu1?5Ptehmm@Yz_*I$y@n1U2<$r1z{xbr?pETR$&$9B>F29!p_-B8} z}@~AS1x$zbYX7n?B-l4A+0&8PNY6 z*8C3)lm535Ar{eCi*C(4H+(z3_Wdu=PWtgzYJS}eo8)`?AFjXT$KNo*@Gm$0w-x{9 z5%RwdKELo=HNUYxO#>fIdGx!uYoYr2p9w^5;+SPyaE^ zAF~o{-8wUnL4Ns{UFas{pv7R_I$Tf7kN>IpF8^*TUu^!bn)b)a|H@LAvDIR*Zk;*f z-SE>a{%3W?(`Ha27L2PrJ{%DKbDBSUnD+OeDSs>clMCHYw#&e9ZO8x80RFa^%NTV} ziC>$4en9=*6LVQsTM4%Lw+86{Yl~d|;q?FU0R3IM*yXpwv+wT@(BBHpKb-z<576I^ zi`?+dKG5!+H~;GpAwT^sy42+uPJe%>_0Mj9yEVUAZ#O9XrD$LozOBC(tZ+Go)8E$u z^ruMkZ#Mz!vz`B&js0rT9oGDn7H;!@8TKRFSgV>i*;lxXmino*N|9?}Ck@5VXY#P` zpBW~6J3yQN2YUP#6n~u!i743o|E2k@>hIL)^nW11>0a3R@vpFx{mOrb=Ev|R<*@n7 zwf%1E&);c&Z3ZV~*!){y=lX^JqY;LGzvd5H8E{PVPfCRBUfB89sN-+*_iFy!Vff$H z`DgP_TIn(l$KR;!KRf?QH9x9%%3qAFDP0aQTPL-c|Soi4fe2X3ejM;fbdv zd~?1!ApN^E|8V*LP0eqozen?HGtd*h0p1D-Kl&+`Vz~TA@|`pAZ}(pnn%~wp`#y7u zE`JjteO{sYhikvGbpG4=TRFn;&(ijXT~F&Yf3p=YQ*P!wOWVH|nR86@+qmH~co>;S z%<8@p{5_TdX6N5c@E_T{lzsRbH-ud6$2o>u`tJ?D(d(a1_S&T6?74xzj&nr)I?mlV z7vtaNUylM(HklQPeK)#{uL=GVAO2ql@b9|G%g-U59e#O0{M$d{^6z&~D*k0Y{O?Wm zPk-*sE@Oq@|F{qTN~GJiNy*Zz`P&5lCw%xfpz!_pdo}+q!N0(V{~YL#AAj2|E@P$O zztD$Yz7x`<>U2!=8$Dt9|Ai0#83E~kP4mn2vn{gA@1M|r`NhAk9mF{D+>bNz6QIAPx499Q{N+CU4+iAlLz@4XD@LXNA|HNMbHDg^Y zeZs%ohyTX`?N@dOh;bZIzi#?h;Mz|AH!%M2OaC#=Z{}ez|2U+x`L75F|LHs3fGgdT z%Kw!<{I>`6KY2S`#yz6^X%gGv{|oE~KmG0A>GI3`r%7S+Uoys@zhCobyJA%QulC{3 zH0_U7eOdzX9e)Tw%g^ebWf`M zs(kqW0r}$>{}o?!8IK75wLbh81@KS$lFKOduiA&dAV7a(KKwO4{H+1{*R1)a{?+>M z|5Jee^=f{pf9rhs_Xg;n^JNg@kovbC_jdW;5}<$k?^Wz6`d8<}Umc)-vAr(;5%;9Z zzut%cdvFl>mA~`1E~C`HYkm0p0?L2IS6%)i?n#B;;KScMR)??Y%xThn?$v7=V0GGv ze>?vUP4nk>_PLD5+>;9bIv@VM7{B|a|Cr{N`n$!4zuwqSR{f3K?*?4qo>chP`|!Up z-9P*lKK#^WJN>tt{>ut~kLH*0=a9_iFAU)C_Tj(DhyTd{`?cSPpF&r%7Xn zkL0`c&m~X(e#5=Y)(Fd=CW*~I#JnefZxq{g(+? zpDQ(g+gKN1{Ap6y;kRP^fhN>*9DC4(9zgu$5t28!{#bEASyUW(o@*B=DJoJ==I6QR zXtXl(mh$BnrG=6bL)gJAd6x_*Ccv12W0)7MA3f<%j4SG>egsA7Nx5xURS%eu&}zUW?gA%S$^rdb+u(xc{S_i zmlTzit*u>GRaI0nCz`t!OG?#5A!qsWt3K%po0E)?bu|h1>CTBk3s(LHU+RqBQ6rvx+(*W@=kZXU(- zMY93)*%YHWqw4yW_d^z+xix&|z62L(=0=%$bOb^)0wuR^Y9E6kmZagyQT{nD`K{4`efbx5$-**ma`2UBlyOVSt zhHhBtgl;-Nbnb*4Cko88?;U);V#0ZozMA&8>GzD?9rULLt~! zTc4R>HTv4FCmK#Uxqc^an-Nz^X4gm*mcoq&Cq>mN+*=Dit#EJ4e!0TEF}y_KLfMi* zpze6(^Ksa>=OFHt@6vnaa}Sp(LcS^o5YNZ|6d~W_J@giMQvPfT#=*8|3i||o7>X|3 z8(#B_=SqFUZ?fcbn4{|8J_RzjV*=06*F3EfIEKQBqfX!$;v|oh)kX93HZ@}jO0~dL zeqsR&E|h^re{(fgn-8YX-&EbyTwB9+>^J0>)HhGU=Y8e#09;IhtMY&=~N*ZLG3i1+h;`)eH)z@vVZ^1qTSnkGWF_=IxB55fi@EN?j zGyuPvH=o_}@^|=kJLVswMkLh z|DOZ+ja@xk%P}XLpJDrj|E^}R>FxU#&7WriFlP$I(d?cQzZzCy61)@t21^j(qS+7h zq2(~1q;?Bt_}lPI9NA}L?gFx9>N&L95Hnf z!)u`|XYjVr(KoLS!`?`ToinEE)RIu=s;58k%Z_QWf!AT{7`vu-x-)=0%A$Sq(XeWg zch{xCE*ZG_w>@a0D+bS;4_l@?61wHPb?;?#cfp={V{mZ#8{?b-d?z(ML-xAt=&VeF?$MoNa^#3QqM;0>uWjSM))n6XkLoakJB8^y5!SH(37gNn}xf zS?)Q=oAxwk71Q!4$`^YSn(|FP2gYa1QPz(p%5bWu3=g3Ue{1sT>}1(TAzSL?V<-!$ zlOBD9-bj5^Iy%wml-OW6boA7X;jUAUJDnA%vl&xk16{Y}^k+aH7NTChfHciTxUZnB zkE0I0f;!lTI`|6eU?1vWAL`(%sDqi%|9EIZ-!{Z){JRso?}GlKomBdCAe_^GARO&G_a9plgXk@2ouKk8t9QWoo)N}EV8 zWfNER^AGHQFnxW{7$!Z0n!jr3F$~8kJZR^xP*;xA!Siy#hubLrP2YTS2)VmK$HT~F z9QP|EiLsA+L}UASHeunC^v8(dh^b$f?^S*6*Z1j1iPJn3x}X2$e&CLiVtp%~FDYDf zI$F{Zg{vQpxThZ~$lHs4PvPDeen;WSv4|1&pu%lS68keNoFkajW3R#^sUYq;6)s01 zF3C=X%TY*@_4hf3V<=&dh3tkMRwJ`yJj_)%VW}9z(0kLxCt=5%t{*}#<(N1+pE!=_E8=*Kn>d~m zxG6(IuL&Hl-I7P@7`;S|>07RMhxOP5Z$u+|4_859V)(3w^zrMlxdzsy_uCgO-^)Kg z!7qm2qm+@1gC2Ve{uC#GH~1v0i9R<%bfZ5EtAEUs?8;LyjErRPCg!8!7hZ?W&Ffnb zagQ&?lVnlOHBHwyrf^wq7^5A^6XV%N$@2FAP!|GxtHqK=06m`-=q0W|71ki{BPocU--}JE@im*9}fuMtOYV0|Mdakn{Vj% znTYGN9sm4*@b_xM*f9J|yI=aB)BJXL_WcL7{@KdhqY0a>@EH~lbG}0JyJ+%vh1Zl* z4-KE8T&MYQN;wkz7K3fsZ3m;xOF=7ig)-mLvK*)3-|*AE=J7rJ+j&B~PMaPkU)aYo z9Gjm$F+cu2n%{&&e$v_e4+Zc)^#8H0rOcPPK+UD$jrIz1BVJ>~l^Mw4Jx}zOx~B@AKblt-bczALn!L#r$>Pp+Nj( zQkS2e6-Jehn?@6K`NZq;H=$Bd{6%~z_^cIU%BSlOm!Ew7`d4pEjOZxyf5&MYr#U@x zQbiN`g02lWT!a^WoP#V7u(O!hS(?W*Yqzct9+r9{)a~I&wuymZ695aeq;LpJtKsEfwnL8J~V1% zdG8(-`F~9H(E7iSC^-;S2hp4A@dwwvfX1DDub*u4o}ThX=E(Vz@SN%m`tVz|s#(s(oSf6yr9rNzIEOjM1 zn!lwVy5F!!`%rtKjRJEH$_=#>&BvcY8{oRSE&u|U*qi`1|7}8GUq1FviUD0&Nx8dwlsRP%rjnX*Sx`*xp~VS|0O6}&LMQ3 zY0a;G6z^Zmd146XiKB_qdGtPpo;Tl;?>$QSM{u5Kow|I<5Y7`rI8Ts$=)9AMbA{N* zo`*_|-o!Ix&J+GS7&_mjycC-E#Dekv!`@y@I&I(9Z)|GLF3f$2u*CS?q>tfhy`aY+ z-vL(;*M4&*s*!sg6~{MGI>Y}*@NAy0SLu&JJ^jdY?|YI4uOtk?+?S|4dQN8O5kE-; zf$x&08D7JKtai$Gc<&+%p@DvKdHALYhosL#gd_CPPcA=c-U%Eb3?Znm&wJEnN0ux1 z=Pg8&lk4-&2=LSI> zeaGN*#}N!)F*x0Q1jDBdPIn@~u+->T8U%6ln4v#Egby41f)IX{kz;-Wd5aAcepggy z@R9&69i$B&cnWN2QU*UGM1QKm&kW&j8a<`g``N!VI6kg-!gB`4q5&uTy}_{ud42`iA?nr4RRKi--HO#l!vC;^F>m@o;~(c(^}XJlvlx9`4T;5BFz_ zhx@I?!~OOQ+@KeGzCLE|2k}x&yiu)kFK!Z|@c#+=V$auSFBOPzJLczNb9S-EGN1S1 z;<4EC?I}Gh3;D&KZ->2>&8!P5&b@W_uQOgnez7^*S;9)Dr)Su6)LTF)cbUO``v?v= zvR?EoII6z+F?OhjJ)`J0wF~eLv=nDEw5@j(&fZ>xO?oqF8xEYG-|ZXzJxls%M;TN{c;z9_VAd z#?broMg!yfjNSZsM&$pk(MNg>UpIp1BSyYI5A0w$cNqEpywSq=6{zoQ?>s*8yi!il zKF!l}M}B|%B09`$&p(d~{VveY_NWzXkeZ)rAHNKF5n5<~yE(I=G?QO2ocO#h*G8dyX96+pj>Sq8BcDzNWI*4Ini?0 zu|EIV^_9N&cKg5B!L{>=wy)5;c9!w$+FAE=J>KhL+P6{JS+}o@Bez`9$1PX)A6;%u zOu4P9T(LPpNygoBr!nr9EAxPBXVIVVE18!fp?Bp=e|F`^^;>OcEx(=1qH=Y;b|_qI zLeOr7Z{##Zzb`vz{q=kz`nz^m$~w4q5S?8+H!wZnTK*8@G>$IgG(`s$uI*FAxT}w} zm#dF%U)|qCf4ANHSU%ykoTlhR%S|z_YX=#Rt{t{BJ=sB@N5novug@dW?s>qkV_u3x zPr_FyT>35H%N3sDG~ro=i`@u+ufioscoPn!JgrZo5UH5xn`7uRicagfUg44)OU?&{ z2(gxPy}~6)ayH-~O8*ZDk;*6f4=8*lr|H|ivlTAmg>V^Dlr8;LPb&O;PE+&?h0jxX5^^b!eEk%wRN?b2iTRapoQVEHg|Ace3l!d` z@QW0_Tj6C2A67W6Nt5t|!pj9j{H($&6h5l(N`=qF{Rjopf1yp9-!g?SQh2?>7c0C) z;nLP7J9Ay|MMb*&s!P{qmbH`1`o3&!M@RjZkPq`JBDoa4?Q4-4_{h_|IVsP*C&{#A zn|rby+0H&JzubuRoBf?;?Plmh#T-Y)qD|SpOj~DLUmMnJHud*)WxAVs`f%9QneBml z^et_@UG%A^t9Q;PdK#|a+xjxTyZWu!rp=Mxh8krnd$a8u^*4-HRP=4wVC%7{tv7=Y z7O|Fe1H7~w7wvtSzE=8RQgh@OYRaSGGJ*ZC9Xqlez1hA9<}m)Y9Z*GkJ~>7&pL_InuERwV?r9gZZbMov(A ziO^X=q^YIFbO7{-=GM0MmQ2^i6O&b5mXl=# zgsPLtG!Haoy8C;wnJsNSef>@CUi8*N+5ciWDa!T9Ly&NjDgY-=845L5;SL2X;d2G3ot><*#y;!Rl$ zvbJV_@TAi;542}`yPLLl*4AIOHe=t7u31}=kU)6n`=_(Ym|T>5KF*RUNEqsA#WhsSoR0EKR_ zao+teZpmUhg05|*=~&1F##DLbMw~h`on4(gwmQ)K8Pl<-mqu#iOpW8t@}>x_SGYI& z{U&OvA4fn?pE2kA6OuHMN&kcdM$h~aBAi|`PSe-&>>N2sjmztHlH9e+qQ2H_!>YBl zv~@%I5?X+eHoy33Sx|um41mothP3%LK!sV-P>x?vn_mkoGJyHT4;U93!2IF|fLZX6 zHoy1*U=}~5%`biclp7ZFYk$n52(_t1h1pc1-2DDtH<)-3*Sk?~^{V-rR_@#ATA%J0 z3mBI;j=wH~fB6aU*GKTv{hO34aa{T9BKYY(Quri}<8O@MCrtPxj^l4-{%IVO{pfyG z_#}?wm*)(cpYBtIPvSWKLC{jr{B(aRd=j^z{ppsdkcd^dOQm&E_yi-r31jg~I|{wa z|I5kI{Le8%w=i<-@;?%zKlYh2;mL9MKOa-Rd^giR4*&aN>Q8GaD9C-iJ@V}@#|tpP zqU`r9^GiPvdbj@n5L3SRJsr>f--{{#zJx7ty!zi3Q+|>aZ4t#ecJ&{KDZiZwcZ|co z9Qjf9dzty=Igf~O%l~i;KlUUup-vb%cKOR-r>OE}&CT#Q{GBoUqg?*9arplo`BCK; z@PO9OI7pPM{}eDs@mDcFnkNuk{-T)iQ_cK`0^H&9GpJ`&`L8hl-f{SEiP4`HI#6)+ zb&tObAgcU5%)i%2@qb+Y_el9XUWKxX4d>Fk$7^EvmrjuX5bQ+y^Kg~oy=vw!%ON)5 zw&&B#@2Kx}Gojo6s4N{O<9#||uK%3v%zv&XM%v}?X8XJRUtoTB{JQ)f5dEDBe3SX# zF;4kA#C{I{v&>)Q@H3o4Kg0UF{&S8pVQG%iCS3mavHe{BBH$Fp?T1|^%i{iL4xyK@q4LhLW`J9++c$h^bM@8ZHI_+I9B%%9_@JqL7f^~bmFo zz@qH;6!SMCp8~Z%bxD{10`&hV{<3!xm;ap!@l%&{`F{p1ivL;WZ$v%?;-@BY`A>@J zf8^JY!cH3*eyWnoe;v;MQRN?Der-Qe$>sk$Fh}uM^QVuxe1cs5uVVg;;(v2lg2XeP$2uAgFmMqEh$ep)_xAUXACym{;(NLhru(f{{Y+ zQ2q?S&*dkdDD;LB$-{--zQhdVek%b(k0jP1`9wkTV4?R^!C)TfpPH1ymC2;!-a>D5 zQX^h+7tVaWFz=PZY1B9JZ%*87OceP8TaBszl=ahw^YU7upl#se_q{8x$g9SJ{2A-= z)}erwf`L5Mn!2A!A49>rrC&)OGW*m4*Ywoqgy%&1CCR6LO0g@SFn2xYD~bFAR1WrG zJ2ZmzoV50y=*}qcc8y@2C8g~edz)^Y>K!|TwVSklmhw(Fbau_=^JY!0P2-e>HT1I% z82UGlKWx&c(7M{--sB`h*W1*&zU_bw%--BO(A;|fp@(a*2brGnW0Qxa`Ua|57?7-=;PSa z6W7Kiw2$p4@ke{=(q6WBhTJgTp0qe4;N)cXyL}Y<^$uYV-pzS$9&do(f}uMgchB%iNvk z;~IwkXe>8kpXL9IdgS^L#u~MA@1@npzG23l9drKp3D5mRANTj>sk25UPmjwf$>mGD z7ribTFSA}x?)L@SL#)z3-qtk8>%tdH=oM zv46oBM%$OPI{DHsC?-1EU-y;O%Tdo|`$scRm_0a-o%+sEY|aZTA9>gd_$KV*jXiiv zv3~{a??ARY9eW)lVRM}8j~y*cE*icH^B7aoB{jMyxYq5`xNYQ9QL~u_S1djmWw?*cOxFY znC}g6?a{it`dB~emdxw=dMoNewm9oD6_ZhMcT{D^MPs}NgVNaFAle`8@<~w)eJX(r2 z`GcLWoc`0g#-6M2e9h>r>!fw2y19Lics zUPIXZd}dX7$8?{dxo8mmp;h&VA@m383qxpMnsekjLd(Oo*nug3df8|j=0xmSGY@w_ zB{Z(6Z%oDc?D9PC;kf;L4%3{9eM{(in$Eezk2XOQVBTC>eJtBP2c~!pu+PpJ3E1^G_SMH; zO3CAP@1fH3svo7crt90`q*wVn++Vy0rsb$gts!!Xc^>#Y3Q?7mt%<; z2c#}{Jc#o;&dCqpy5xtrzDQ28=jiI+9dDSr%zOHZw*JvuQ2%ONpJeh%evTXWhMy;< z9@q@JFTHvE&@I3o0JgSm;ppYSh=z`T1zEVanpX=O97Ua$Y=zx!P3$5YU4nWaM!hdq zHflqi$xf|oqb0Br*@e}?>_>c6?DB~#>Xcv8*RlkFJa_6 zgZkcVTx09`?=|e*4|&E8G#`<^^hf9Y`%l3&Irjgj<6+DN$UJlj*8##FXS7OSauFA)J z8CU)w&Z|`Z72u=!^24l$J6Do!c3&rNv3sv9_v`jvxdii{y_ZH^F~)-T)kJ$3&o_vM z_O8M+N>lH+^1#1%2)3YpGv(IIXmU#Rv4t4VwEx#@)CT>PTbe+oQAB2u#8jv8 z^cpl{$Ja}2A@}3`7)N*q7t+<(1H95ER3~{B(FYznuEiWe{aA3iR?x>J7kURUx;K@+ zU#QYFCIy%K7b?Sj)^qgKMEQPDJ)(v+Le|Lpm1H2$D859xaN#<+RrX>~W( zopjpG)%Q>xF2Nk%gmb{q2Ap&Ay+!xn+CmW2d1JQByr93 z-`4}2>kaD4e2!k-&k5dq9Z-%T7q!%jF|mlJ&yTHd=I_)PFzFa9zxz>5Uo2$ z8k?Bs;ifJ*8&F;8TqX6rb?UPH#2>bueWyTqqC2Jayr#-W*4qze8AqCyp=GiI&Dm~R zGlpyI*sx&10`Kb1wiYZe#A+z`CC+itJGkSs-aLN%k~fYY&se!t%}w2XSl`t$G2yYL z{vI>_2=-!}iR$2f$%Iz=b@o)mTT8FXy~9 z(dP?n*?Smoq&UZyH1Dksao#A!t`DbApR$K^!F8|q9OWYUoFvnro`+n zRe+98p`LznW!b$LsSa~F|E`F9DofTAQN&D~6G`dXS zL^q#pHx^wAbPY^5AL*zvsh$H&(}=#AqtRuuPvuI`?h|>MwlS(d7vEd**)#5={zv*s z`&T2KD3Eq<7UM3^lE&Yce}yqOO{~hG&r?NFkEZ! zg+UNU)dt5CUME~<^wF#FEy2IzX33vjeWqItzQhMOb*{lL4&e<(K7DBu3@;gaGm8W= zZZi1$0vH|aF>=gLAn$GiofW|7AkX0R^;|GKY2?#aal!B*gU<|tIGSQ`{;*Bu{!e4i zcPqpu_Zgf&D1pSD$S&^*uX8|9=cUjly8~hQZGX zf;g%*IGzqTAz^U(8ZH>VYxJbi8w{T}`1wH)M|}pL7s5YiaQfOU7%nw9&0xXMV)UV} zOM{`&*x|w;h@)2x{elpFufZ=0;g=Yk&LY9^GoufEWflyl8v4_MAdY@u=u1NQ0fW=m zslkBv+X(d4X)v@IoIg-jS!)bVGi@*|H+G;iZ7}?k(X%LbPM6$Un0}7{96!qR`yBiV z#vgI;Hpbl^v61D=IZoL2GJT5AErP#n>V>)9!S7`J2?sA>Ijckf#~)*S#G#+h_$v<~sk!g%WmXC=JwJZ$O1^RUIk^RUIk^RUIk^RUI;QAa508J>qN9-fCS9-fCS9-fCS zej!DnkT75ChG6Iuo^OdN5n54Wc@yTV-Y7ZY>l`b`A9tE75!eOH=A4UpX?DJUeInO- zcjDo1c=p8i#kj7Uqv}<_1<;ydc8+* z1LNhm9@Tr)%9&{?<}baMZadT8VdVScx0UhZz~?CYrx-s8^(w|c2`!u$LA~@|x1!IL zF?bdapo&$0dxGV>Zt#?d_eME&*y!nB=hQP!^K!A;rAzQ8V+VhH$-MgogO}#y55kTV zI*c5Dyf+%u^FC{Ee|%Ll{w1U5Y(B!ZKg;ud?n7aQTQ~Rm!t55#%})k3*FCz z@0fP+&tHQ7v!OqSseGAF0V!6yyvcZPLA|2Nr8Tor_&qWB*^nPaKa=d1?_YmzXU%G3 z=sVDkv&|i|C6pT0qn}LAJw5ZEv(bkoQROZ(a{T*HnOSBTIsW~l^whtJX_r|>&Jxzz ze_k^R`swQaw~i_LFb~hk_s`>CH(^_ho|nbU+fT;ev=%n1zW){@AKgEao_bW2oC|S& ziNb#rBcIL>QS>jw=y^B>e=tTqx_P90u`BHX`XFc|NPnqva2dI@hvtwFBc9J`7cXO+ zsw0GwoMPJWplK&S9H;YPeO!pU?6n+TD6a<%XE#uYIQ)Xrt*Y(on z{;}Du##Yz(57%}Rd%E>nuju!2nxaNdyLQ&?{xzndNZOa^Kc;Z%&lCv%x=os2t&f(o zj%i#uEeh9ib}C%Ai*8@7kF>Ab&r>YN?dNg*P2^8hj_yBtyh#72`f9w2^>N3G*fVat zkR09a4az=RzP6{fTdShixY*5Ymr|vto;Revu089N{0>f2l;SkW{|u)o68UbqVh6(M z8b(45CkX$vfQZ`_{$+&^C|uJID!fh6rxgCjtf%x(*KXRLf1>JDr0S*ZFhk)(ieAgn z{Y1|XGB$}#`XuRdP|1;Z87{sTbQD}%{BXIrJg=wma))I#zvOU|0`)_UzrwgHrzBzN z3IBvmn%}t!*Y=U?R-)JTsaEv+l$@FvyeC%EG%h|9{1gb+@}E?=?pG;pU$;NZjp+}SivHua zDDzvT=rt~TRk?cV{;&0{Vwt4RE+t>)G3uWhuVEUJ|5ZibsPI2i_<+LyRN-ZurgF9Z za^9hGH6GV4Vq4dqhgiNlzScu71=l{?8F%FjGVaPD|K3q@QmWtT`MER(*XJ)iesz24 z@hj~@`s;D0@msBG=2!Z^+t1fC4aqs6=o=NT&qv(~*W*sw#qB@4ncnR`_bEBL|2(8{ zEnmi+EC0C|`7bItT7D6?qbsL`aaT`0uC<&>rYF1UeqN_=-Otx4T%VU475*{SnW6!O z>+6Rd3fFS(Qn;40PvKh6M-;C0d{NAUN89reg=_tvRJgY1FBGorsjoM*9C^M--l-R!uKit8#wLG8|xUS{-Do~ zjf|5W^t`b{;dgPGqQjh~`T0&xQ&h%jDp#Ke|NnM=lyN}gYooGHlG8L^G+x3u+5gWK zeM;dy3ZKhqvh(K@UaIhZh0o_SwY#R5_99%<%k>rM^Ccxml2q;&cyyIeI>s!MH0Y#W?As<72#iGUWytyO}IWU=ynlX5$S-U7uyn)WE}e=1nMRImT;}7^f$tL znU|szrwPA@(-f5|yi?&a?ufoy;Z=%W`V2wU3cs7vIwCm_01&-(z3x|pl27!XSGXj{ zlCxKc5NkR2DO{JUaZSHj(QA9&pzsu@NuT`+*Y?zSSq#0lr>3utp_ihlT&=&ZSDf7* z5@y6RIZeKNRw{g{!Y@~Ny~3|hxb$a|bCtq(DEjv+Ty!S-H41-3(XUnbh{9zI5|ji# z1(I{MO`6|QgH4S_<)kLUg3Kb{vn0S`v{VgQMf#>AiPoGBT7z_ z!sYs(=r<_*w0wI|0K8SZyA}PV zivDvBz09+R75!z3{(BC6Bj>%S=<5{yFCF?m&XenHvX7KWS<@4ifb2>3xyv4UWsDR3 zm5foe#G&8Ad2;C5(Xv~|MISlFM}3Hu>)oobXXY{}-__H1cu%D^2^OFMjsZK*JB%K|>gj6ahoS36bR zJ#Ae*tBzP_C$#%kvo=ll=XH}M2Xu#UAKRtW) z*nrX4iGI}A+{z_lRPwPlGR8r&UDPPAtp!K07d9B*dLej4nw;omg2e?JSQ=yz92|BxZK5_&Ll6zzmtEOWTf5=P zIJb2T8FKBnw(@P4(*9OUiLy}zow@3-XkJ?$rB@V(KMRh*rcA@bYz)7Jc9p&dR#v!6VzA43CD6Ol(s1B!9V%L^&WU-D2$70Z(Vq&L%>+&*u zVv6b@V@Wif+RPX6vS=C%n~8>rKMh7%C`=J=oM_s&V3#YXGOn}}9kJQ&uIAR9m(HC1 z{4Ol6{4QIuB5?Y#zWj_^zepE<1JN%20J|>!Di(RJ@N(nt&;F*__n%8+oc~oAKY&4> zfG(l&3|LVaF2rO6-3Pk7#(!XCMToZo83POKFZ>O#o^5FU_7mXm1}z26 zPtUCJgXAjaeGJ1LqUo=r?xHBR|&AU~@7mzjV5IQ%nV&?tUd z7e=9x`=LDw*T0j;U!gP3wokPjFi#-3?YAkW{f3yoF~A)z|F=`3^*_q|qvP=Zc?|yu z+J%C=-?2yG^2x_xl>LsLVma!Ckz=?1*T>91Ri|2pyT;+a9qnYwckKTJ^UL?-BE~KM znVA0naANh|3{8x*TmCf66C|_Tp(|y6He)VV%9nTt z*WXd!t6_eq7YJ_qpULg#^4E`3z6f2x{I31GCs_aD82b$||9I_3z8A?BZu#Q7(49YB z`={9cZu^aJ`7XVCyoSeLnHYrQqs-s!;L>gqFXr>NL*}jXmq-V)gik>D=CS=(0FH%U zt|KMe)&EWKyKPUjMQ2-%BW%EpIBdnA@Jqj5jt=GaDdK##f`0w)bm{(!PKhr6ev}`@ zzo(M8{qK2+pHy-A&x7(&{70C-j)Wi(KRst~`8~8>6o1h|;=;a$HZuHlN_P2I0f^$4 zt%8OXKb;a?{^jVnQT)S;EMueMr*nbJpGJp>;!iHNj9UNGBltfOqyH%LH(D{K{xlc5 z<SD`yga<73KbjI;{x<3V&LBMc9?P%wrzyeZ-w@OOs0yYM)1EB!*4n)=ym&@ z7s3DOnEIDdgCUgieKZ}G;?J%BbusO?u7d``IPFIcpC}C5$e43J(k_1o3OD6D{0Eu8PVrwD!T$;bM)8kE z@Lv?c|G3OwPWjcB*@CtG$QG{t(_`#k$BNVU^%SW6()e@ve~RnJDEkktB(C82rE64| z|9Y9fUHvaN?797WseC#myZrBy`djkc?@$DPRRsUZG37tS{D)a0l}}T;TYjO~kLe`Z zd4)~s{zp@~%l}SX)I_!aVdmHK7ftCd|GkCL_J5Z7k8tCV{xqey{J#Z1hCnFnt+&wU z=%Pyn?hZNRqDAQ?DHX=^9bLY#qGFMupSbU6d~A>l)h1Q#ZNaJr(uJ-GK8Qa`)A-18 zn(V%j)6{P_ahm!d(bDB*E2pVn5k{A66w{>>#dHZnF`05Rr&n=I-~Lh_ebY|yYL3Zl z?VNr;;to!)K}_o>h;}Xhx;Wi{xSP}TojT=TgTEU&y$*3N{sMl2upM<0`}uq9Q-}Q7 zfMhqW4|n#$cT6jMXeK9lH@3om=6?9kTxPe(&h4uY-X+FQ7KT(Dt@e*dvdNcp!Hjj= zXrg^;u3uGL*T(vrtAOtn@%gz5K2shm z6yJ)chkOG*{9)g>;t>3P-J|@E;o5iS!?5jee)W>!LT{ng)2Rz}quu!1iN9TxPQmXe z%w~Myh|ieT8yc%-eaE|T8FV6F%9P)jf5x4&nJ4L;acAodtyOilTarBUN4~r$ooj3O zV{K)cM)PK-I0^g?IuF4Ozj#8aIxxw27UsTCqaiZ8&$B4wxIm)p>En2 zDUFr4VxuFF^Fw8$t?`mB_H;z9nWe%Y&AIm?}_&_@ubR-k~6zv9oN{~f^ryUjv>)j$ z`Tt_ZPuPFz>E7Vo--q8R%BQ|6v7XN{C(N6b^lm&Us-M^SZ3qFFKQX8LxObH1=Oo5h z-pTOmGI{xE%A570IDfb@Cp2QNXjF3r$(U7uQ_YV1-$62{za^8^kK&mIow`ac@$!E@ za-Q{b=gzsHGh>Mi*NgdFXo+ScE;OA`Gcc~M zS?bt|zTp+v`OMfHa!7`k+`EQ&=X37X8e&X)XB8}<>&+F!}rc+{LO0emSpfZCC@u{1^(W+>2XsJ$nYAL zFV9?4yJqdm)mM29U0vH4pFn#3lcBtZfa@CwyZSYXfC7=Xi*n@+uGIBv?*J=sv})qRa@&arPG!x zv!SUsJ0|O@j*9XK{!l@0+XTN?0YMX`tL!)Rz4-prH{%Hv_($L)s0a$nJ=0uPp0N3x z7A%G1YQ|bI0E7N+tS=YhYhfDPCYnIl0H-x}7h`<8Wu7(fjld2?U`Hab=OVC|BQROl zr^}r|^+C|s{0OX?G0ZI{P~93L=!|`kmoik=zT26OF3fztKS|?9K-Rz-`zYT@2bl-A z2>~7|SJuI5n~pHOU@6eay1t`G$F95ims%Oy zNxXIJ-v-Hp4xL-@~0S_o_++wNHHDrgKCxaHwNbi&BN(1B zxL&ny^S@?rdg2ibpD=iR5X8}DgVQBtFw_}*O%TLUvBB4d@aIf@&5RSs`<$uQ)d7qS z&NVnaO$mnAjQneZAdY@y0vPZhoP6Vg`f9TQ!jo|>cd`@!OdAxp`P~-M$Wqe7#%!haC*BJ40jlORuII| zMuVq9_zHv14&f&od`<|z-RSv_5I)b)za!*R=oMq1cjWez5yrc5KFU{n&ztisGS}ip zJWuUDGRO>b4DQd9g1>Ys(fji-PLU@3FWgAwdHy_E#5hF>v$_I_V;BHT8cjfyjc0zkt={#@j$OwwL4uuQ=o70idDPQwK9bt;Gv2h`xeDM zQg{Itg?}G-aqcSL`mQ9N%Q4SP_x$TXnP>j@DTMpije>8({4-tcz9aV*1xRRLE^-Ax z-*nAU^U@$I`XX*FN{luT2g(S>iaysG+`pd{ zIe&%uWR99|Q(P{6BRWUT&(enaKZ{ z!OL>=GK2rMnIHV~OO76ts9Gr8jq9%2=8?Y5n!yy0p?znoU8aQhYnaDoo43$G`T~Qm z&B+%z{|Ah-)oyKqFE!;}m7}lb)Q~B6bq=4+sjWu7e;+rOaq@*bTkXyzcMbEM!xmV;wlyU0cs6XUil*7gTo6#;&<$gbAoX$kY zD8krqLpRQkQF49)!YKSQlH&@&@q1!$T5}ggFZH0kFdtCzgqFSq(zvuU?P2+tqEB&} z@J}mT+LPqpr0`NslYEU=Gfr|otLSSuO>(v=ypGc(N8{~`yK=TG{81%GlCGW@jwZNz zrWmI^Vn4%a9Zgh@+(D2WU0<<3$@#DnT*7J6N8^o*Q+*F}nxZ~VyLKC3+_l>dMX%ds zNYQJ2FXL3Mw!=QgX)oapPE$0@Y1&Iz;}0=Ta{iXn6dmHU$oV)QKf`{1%agw8Pc~?MkKBnj^nV#_b6i)mU#GW#bEOl_XqpoIL>@4&( z4leX{j8nPN_iTywm&)x#GRdHfQiM zUgKpDMb<(>F3s`qO+Up@kEKcL|8NmD5x&O)qsMT+<6{oYof1Lc&nm+zrwdG ze2v0)D%`(o_I)Fed>P|JDr1@OtCgH5m7Hr7KBDkz75<9C*C|}C8%h2L6fW0?gkPud zG8|JN{Cb--zjB>MxXd|}u};x{P|>$5{6h*KRCtrZ?^E~&g&$IQv%=;1F6kq48fCwz z=(CD`RN)&HK9ldoNzNvPmnmHO0A<%GyiL)sQ}_)EZ&$ePxA!n!fO2nC^uvl?&K;Ef zHAR1=qJL7+XB7TJhrWdKMiqUdqR&g%1JZ}&%XkiN(VDLBY3uB(9q8+6x(2(3VRy)t zog2G&_sb9aTl&%lcDZfm(u>>r`mn*_CfZDrHc;KNq}G0yM|evYeS`?SzXMxeZ{rOj z+dA9&uw!da6M*Pl^p;qm?M<}9at2@fWwtDi7i-EZ*DSTzuXCFhv*CA4nb;V+y*)!x zI=lMXHe$Q9g|%(N<;%8U+g|zxwQlQ*u1>r8dUJGXxsD?n#-^LPy8|D6 zwzqY(MehNg<1MeO=~~^HZD{Mru9S+JuF@N!Ud}4%#n?_Wi?2V?U39W!O?G2be|z8BZP?PhqaI)0 zuEn>eA^B#jcfaAr`ph?7j;_$Y4ZXat&2B#~)e8!V`nEO98`$VOC}7Nwjnfq^wqltr zP3`^R9m;cciBu!Z{Fe17w{{p;4fnfbn8Fy)XAG!76U~@IeqnCNH}_<*U2v#L15;Jn z>1=lEbZR(%Z*jZdHbf1y0rj=PC(1p2{oNk#J)XI2)#~NTR$)8v{tcOxnZPIPn0tD= zJN?te2{b4s!`{rsww_)(=WN36#o3M7o*rx*-_a2#FYVfNOc^*`WEWg^)zuf8Pp(7L zqfB&W?__QooDVZwyRd_KPjBrF+9W*8#%!Qq`?m2;;(9xCWn#J4ZcWLkQ=*lh(` zr+JKR%p*4}mvI~DDdlB!Zlw*Rd$)BqXK4F>>k$L{Wcv*fWT_yQj%;5OZS#vS{Cg&N zEdWaLuYe8OvAMO84+ML#dkcEEbx`b1XBFwSn7Ui~$xTP(nWYxI(bcL9=!hNHt&ZvR zco!@ICCvuokp)y3Bhcyw?A*;)59VqB1Ie7tj9ZblX|r{^8QqJUdV8}y=F*Aw)9>%e zk`3BGLR1Sj^!AElfZSx}pCp%H``|TKu3DSz!71rd^jErWh-TuRovsASm1aNwD|t8Z z$cn&`1zM9Y6>WDN>tc~E;p~-Nz)*3`U@n==_V;Sv^0BTOPfWhO&NZT0^)W6;V?{@q z-km`)_w?g3d`y#5BUy{fp4!2Eu!D<1R#ovDncf(wxz;Z)=d=1%{T&;!J*zi1wBj7l z6316ww#;7RWOJjZzOUy5wmOIVh-(1J%p=e9w zbVkecqE}frCmVXYHfK9g&FCJukb0nLM02YW*M!j}uCh)nLW`98+GEVRDqX2p;TSq| z_6s%^Oc0#)#=1zN_LdFkdAIl6ARhC&h8S~OL?gxf%3JpkZ%c%ue$;i*SX#%?kFRVD z=R^isx+-|%u&S$hb4)Ax9c>{Gj@-p=bU$$iv|Cn9S7&xigSb~86KlHAMN3S5;HV&Y ze^A$k)pWSmM_`R*K!S7`O&czlawTl#U%KNVN^W^u2x1(%CB z4N=wOy88r|0ZydbPgqo(E+@Xaa3X4*xK!-szR2wJztH?v2KW3AZQyR40i?~ZpSM1uYQA;tW{Jpf5DziIPp;DxgPKL~T1{ew_;_6NKOoA0aL{E1NZ=_f*)CX&Tw ztNlghccJ-2y_E1t9LGPve7byke1%-^IcqCk-Ez5*Z)gir9a z@bmj4!G(WN0FGV$^I;$=-(mM2V*AyM!~Z7UPetkfGV{+JhyRHfe(^Oi9{(4icvSgS zTp?LUAZobvzh_D`|6YFYChZr;|0gIss{Ds0$p1SCj^clW`CWb8<431P^M7lC{GZ_V zcl+Pr3G)9*jQ-TF6kL7XNnLh^w!AygtVM^`8&96y$pZdlat!&dJf`zs>`qK^Qr9%YR<@oe8uaIEC@%j~5}()ZgJ> z&-_(ZOzzh$|3fkSyC+!w!!hTNy%Xebm-ch?e~9^y<}|knSN}<%i>g1Z6QeMm{k|4c z{$A!!87cmcTR!#+j4b~!^K+CF;PO8hWB((}=+e8#H;esqCD_E1%)isYUH&yi_8OCi zTaJ1?=2Z$Vy?gu`=1(f>t@4+g8qMFx<-7KCkB@;as(j%uHMQ`6T>fn_^%s6;`Y>(n zOT!K;ZLeRd193|KaN= zcl=BF7#@M(mVXno*P)F0FVrB?VmE<@p&(%{{|A|W zp%8NH@>k;FSrq@>%wIhY{~-Eb6#qXm|Lk%2zs~%w{YII8Jp13y`n&u|v@3-fHp>0F z_Wu>z&*e`|kpH!q_OF^C|HYU;$bN45^~^tB|LJ0WxBUm1e{QbkHbG_SurH?k;R)*h zVF-hW(E0N)^SkzTkDGb^aO;0$g8Uo!`oXo+Q_SD3D~q($U*eT~{pFB()$WBC69}Ks z7o$RiIm0){pK>M^xBkBc;PR5}MvMyzWyLn`M?}w0gkSpgeAM5ZlN=$mfhPs|&YGSd zo<+w5A_)I6=;xl(D1Xl!`t|pNBmQ^d&*gtChJPf2e`W;#1JFCF{L-^Qj4*@kN9Df@ ze{T8N82;*WiQAuliT?upx%@xF^>0-9-OT?i2|*zK`S^4BUqjFv9#YE{Ojk{JH%9g2GMt4u8@4R$3hkC4Q3S z@_!(Pe;xBnKO=s+*LV3hiT$1OyP1CgJQRq(0)H<5fb@SNQsF4`kMMvc{wn;r{NKYo z5oP}))L9U;{VvC`%fAKlf0X@3nP2OFMFjtsG0vm-yQ#Aw=<@4v?3O=>^IsJIUgobe zQmp;nAHn}pQ8fS22!2{m;+Fp(G4(H_&W@nVUyEaxe<4mDQTh)szqTJei*WhxkKung zqI_Df;_}nHYRX5fLlq4+gc&w6{g2kQxcnc&`728Q`Uw7Y5&S1ZU{v|7%wMPGKU(+V zmj5dNOv0Ml=pYR?g#Q8kbl~BTgDzTBu{fndStxChMU|B%r+jgFWtB-y+-JtY)w$xl zYLl$STB#e%d6Uj5)GtY|nfRkVMroSsslQPA-T0%nr}QlR(Rw6G(-ccg?!buj) zZ~S1O%$@VF_xNn=sojXZ$zMq%4*UfC^YV-(~AbVLF zXC=Pe@YH*LICKW~Wryw~XBX_s9~@lsJp4Ai`e0($@NMrJCD}&{2k(9f`-T7P?EGEH zm;dEIzJz+8pO?Su^5?x>FFknb{-1y8l>HCnSAUuKf09V-dj6KdH7{T<^XC(VUnYKZ zLE}qvoa?s}@h-9%;uA`b6bw?g zDLMY_f~Q}<%{%tW>gljiF>F+@>oJOmsBqdth2HMMM$|9h$HmJtHZ=9e7&C1bTYoh^@SOVn zv##`RyaR2k`#p^Z@_~WzP;^0CYt`jbPP(%?5Bt=sa>lYTwF%`@dypQpc|6QRJICeS zI!4~5W8|T+V`W1B*u3l*d2zOj)BQy0O?{Lt$7{0_E|+ENu}xzkiGBWo9g{IZZ6s~_ zd#8=*1LwRIyPrsXV#>v(c5GjWYrm7hGYx+y;cqJbFfPciPzlMxrEAF~{Nemxf^#^% z@}<}AB~kN>w|46E86Uc#e{j~F=U#YsW%a7h??34H?KnVfA#FhIguTMygJIcz@u%@$ zou1AOU=)Vtt;&DuE#3}*UZ5&{7}%GUh;Z`PoLw7lBZudHM#%A zGpFqT>8z>y-Sg(vIByQ)+%oiP-P(=G%?Gkf=UkuWwE}K>dbz{@7@i$@-Ag zIlU=bB9d)Mb#Vwk$NsizYQ zZ7i^%sU^j09zm39>r7#-)t0ud{@zqqPYO#A?84B#R8t>T40iW<9Ha0ad`^rN$6K-~ zEScO)x}dDSHmt}^^=7e}4FE)Rc6DCpQ+Ci=6Axbt_grXKyY@j)(*}4%NwqfhrgF=2 zTT*?kZBU^*+hf*gew^;=8+gQNb-)ZzZi!N_F-3k=c8)H}+$lEd-L|j#N)m=O%}O4NMf-O&!_J zzEp2}S6?rSK|M*S9Fez%SGuNJu#Cglc57FEJM?Xa!M)XD6T^iAth`0x-nFQsX@suM z7HEmeZE5OhqtCp#x1czzj%;foOShuU(DHVPv3LE3rsmCCn|fN%;2qsC_=dLjw!Up2 zObG{;zF}nvRdz#v^X4p6@7>ze4U0neHkOJ4sjXoQbO5eQS5L2pg}7LjWLnW0C_B)c z&9?NWa;m4`#>QlMB5QMYo3|2evw0T3u49AcDDPt)sh#x_O4GH5P}}udE-|%$N}~7BSV*n1j6!O_@?&R$jKCe5wIgUe!>$=F(*= zYEzP(PN&N_bNSUPSJh-{mNnE)nex(~e)cCj_Wj$J4$V)Mm8Ys+`m>=+ASazlFS>YP z`r`D$)CFZ#Wo1^V<*Qr0wjp;jP2Om9=IXV%tW=6JFHGy~D{9wVRlABw0y+4a-RE3E zYa=z^s+G(A?5jIBcXn;<4CL4N%&Ca{hGlCms}-#y@>i_B^2%jbv5-JMteLrbZEdEe zcKOwpO<_X_xNPm(+BFS=)~wE~Y-q?_wYp*Dr5~8mj|pTWX6_c8(f_{|{>Xd~m9Ty3 zISW7DvwQ8WVm~tX6q?UYQpjJ$w9io-^#5`nu4m1DINeM`#}sgX>FqEn|GO2!_C#P0 zMPP>`FgzDA0oPzA%)swaPFGQ!(>-aP)zLm76w2tw$-^@q6Qc5Df550b*-IuWuP;Vs z5lY(0dAPnefv|myNqq==gt7US%u6ADlCjqbTc_IRlkmTDUy-r3d}B&7C{SF0n+OV8 zzW5*#jBF@Ai0~{SWT!fo5tY|6M&95UdGN_v?oJ9Skp+j{lAzh@&DyFRkfk|HjZ? z=mVVkk-_f{5>Ijy}_4;@FfO+ZwNob z;O`6J-KJibgz#$&eRT-0F!-_%US#kUA^a0YzWE8{Ei}+60gMhVGB}OiU^s5%pA`gg z^c{oK6ZT*jHn@yDKkrV1)06jL=rlMzg%5@+3{FqvgQ3*msUV1>Nk)IX@N&XO4L;jJ zJ?{*I&k5njjQq1h_;UuA*}~6$%;4wv0H?lS@N+{r?b&VLE(Eh$4NfynFw`3S{2+*< zIR>8>!jBqUW^F%vyQwd~IP_uf?+yKQh1ld*3|<_Gks0bAF)tN+{y3%UR0=eG7OQ#YIfL4L%%gnXoNDy($0=RMQuwTqFIxDv z-DULg&ue1;^I)H1&!0~Orz@Od?`$KJeqQI)&yAefIb7)18#(@bQpFUHA}+>13Fb6` zgAx1{;}wYn!SNg(xPSf-E*(Py=>gMS|+J^Swr?%%J;h`rs|XJM{`%gl1K zv5$X0C|3ZBjeRO|^fFVFLQYh@%1pWbeNZWLK45VFen;?TqtDWue8I0X`uO)>f=@R3 z&}?Ia;9iV<4#(JeMvR?1W9UDO;-c!iG^X6EWAM%xIoIPn7$yJy7&)ymdLE9Ue>w(# zJ_aYBPf_|DiJ?CjL;qJX_>W`cd^ZN?<~F_%J}3u>A>M>{V_w&IG6t9Vp7w&*^io$> zP9@7B`VVoMqAE_G;oz$nr#ukm)q-EzAbr#<(- z#%YSSb6VtIz{fi{7Wsk?G49IQsqllGrf9dqzp8Ml7s>gC!ZrOP3g5$N*KYSQ?%HiH z<5I6$KHkT%)Jt&D*{#>JOi%b%IISa*e+`4u-$cIP!Xxw_i=h|XrT_aFdcj@#H)H4p zcj@IBw5z|1lP`_G5@y6RI8FBXONGzmwA8l?`4r~b$P@brUdcGgImBs-syR*g0ZvmS z2Z~f1>bpoTmPx>1BKouIXEtp2~el$>~=3mlQsraN#AWNa4C( zVmFfC$}|+EI8FNR;xt8KPr~m~xSaP$e@#E1X$aT!+Wxv;BA?pviU|4CjvB9K8n?ab z6n=-2vx?KC{~m?wcDz&J(vGCRrq}JL=^L5XZ7;E{tEaT1^xG{cheDr?a{X3t-M)J4 z?qGUXzSvyk-^KEGI`ReIt>k=^(-iIHv@3s@agqNOmM?88<4*8{O3p_(P0_a$uKVW` zjEnrAvHT;9i+sVKRC2^71U;*8Eq{b@k)OhKKZO@TR=38KH?@=lNBYx@^5y=(s&jEnqFu>6^he8E#nj_yBm8K-gaY1WydQiTsGd_Lne zE;N0a!Zkh7Q6T-LZ_shI!nNJ%7?*lI!u49kxY$kbdL?I&1aoL$oa!Zgo3hp^{8k}G zyq@QA{-9S$=tcK87w({m>ZVh6#GC^>rGc#?6d z*FBci{63}dTNM5b-7u9NuLK)xi2$L$@!>dHNW>UPVM_i zh3{4PZ3^GVIN43p4=Y^LA7tFM+an6s_4*d$q|aVe?qMZg<4-91Pbm5$3fJxUjFO}2 zpH;Y~e~xii{}&am_5TIq(k@9nucYuYIe!r&N8?(~?TTLO^XCfJ`eTjhv=P^r!x*>E|*HJ$Gv4G)1MHrsoD<5BRJ87F+ZC?m>-&O< z$ls{sX!+X|E=j7_m*oKQOioV%{yv45Df|+J*DJhQ;e85Urtl$!FITwqZ7O$#!VfBX zncE3^LgBTXrs!FPU#jq5D14>DlPHe@mAlF&&96K^A^b{(S1bCf6fVyhh<>%g2Nb>7 zin4br{QZhv?#D^a8ijvL(XUmw+!qslgTm!Ll<=z+F83LPU!(AueEmuIwF)m&_&SBl zeHPJwK;f;5{yK&4Q26xf(8x$_r;Y5F#!v9s#*D1V!&%;E2xx!Co zoa7UIk>qIyUe^V^!=xAXz&#K=A7WXNS$s#no#36*ICqKkdQ(1*V^8az&G^rOU#ymP z_`Hm9pHsE5Z6IS^?1;ymkasxanJul>7FBL-Z3_Eyv}A!LQezPY*EWqoIp!*=G5Fl| zT{+T7lctw6clX0VT5m>qLJ657;MFJW!7);Dc@?=^6L&_mzRfJTWUG%|MEL}LG6wXj zs9?`Yf#q!(zvL~IC>8$F*7hyQfyT?9#b{ZaS)yk>|Ao4tYuFX#IdM5wA0vmOW@RDy z>g%WVjWubZPnWl6J2$}%D%|&V^(eeTMueM@HVY_2^yOG>nd#b?F}c2+B{_Yfuoa|7 zc$Dd$UQ-rL66ob|M8O^&Lp5gaj^S||z>-nt^UT^mv~9yRYx}~Mr;L@684IwsliJ4X z$acV?pV!;h0@EYvF)bhx1MMZ!BEet_3cqeXVGm&JIMR5EZ0l`a(A?e4-VkL4tly70 zV`EP?t3gvmzhm>vN9*pSQ{}wlijP)EvTnVaTQ|eAZd<#zz;OY&abtUbZ>zVZx3im^ zyKeM0a@@PE7we#LycNrvvtH)v&aLoKn87Mt^mXoC+1AX)9xP9eaJ|eb zmh8DBJ*Gpz+H_@8J9&BAW-5c_zqZ}|;Ska7a}LAOOW>@jvk$JC0zRX+7^jP&$-rl; zuR-!1cMby$Q?J!@W93pRTf39J#BB3@$OOr%xYhYti)z+U^7d zL{yYJZfHG!$kCj8;*?4`-N+g)j#yvhm8Z%3t?!&w`f>fGG+nZP)>_Z?w`M)FnP11B z8@VeR$9Wr(oRFusF@3a+>78v%zigGpFPra?twnoet27?jV!W`C&o#Jc!!Mklr9;+9 zn+*O%x3j|ymm=u}X+5TDu0@yJxFD0+G%!GCo!%~Zwa3mBnJr^DT7#Cx-CCL4n7zHY zbP|1s9S+qDP2-rJF|^{ZfR81Pwogiqo){=LMEp!wTzEPN8j z@jqm^gZ%XTP52~^<9{TApPqLJpTrxGz@rMUkm)6QLK03ad;)*}q_O!+gpgyG|34;2 z^UL$LJ>&3`-ckB@6NvDfjdH(k`B~6K@yl~4X+II*@+V^W*YR_s@%ZVTQB?UY6Xd@z zru^;+^8a&8`P(PRKO1yW`VTRGQVhqjYd>u17|Fkr`DGpuF)n{0^E0`SdGfycP!6#P zm;ag={SWhVysB~dk4%l$f0X%Uy_$$|%b(2S$JM_b1TlX-S%^jm!toT#ZR`?!9BkCWNZI&O@y?RzrayU_$BU- znLob>xfIT{QSMjx1o#~DJH>d1nSX{5a_rXsn*d0r)BU_>ng4W$-z|TH`Q7qgo?!VG z@%hIsKY5BR*zG@V`Lx9tVQ%?l%s*cJZ$|%#s(&5xpD|ATX`YEHzj1=)UlwEk0p>q# zoborv*nj5)%l{1Q2N5Cr4Ku%MZ}<4`W9%pV^4?ae;qqS@(|_xE;w}|Nj!9P?u8Fb# zzy$fvj7Tncq=A$1mSW3caiUzvBAIv03H4fqp& znHRo;{s$9;LeaSvs^gp*#5AS3{J%u`QT*$ezm+RY{4}Ioe#(yGKYSiC5DuGP&m;a* z@#pe4LO>M%V5w!4b|rr5QZE0Sz@qpMM(~p^l>XHb{M02}e)_&6iocurrJs=f=v?UXe;3yeQT#*9Pv1jQAbvWRxcu*rDgW?1 z;_`h35kKkT@_!ricU1YU7Z5kT;ouv1;y(+2F8?>Be8f5&Tx^f(7$p98;?L!OP3rIP zk4Es%jNt#Aj30-;y2=);%YRn{e`5^)Pz3+GBluG>{7*&j&x+vxztvrPj2%@Sp1Zfu z%R_0)DphNH`@-U5ckkWZc8e89wu_}Su+k!Ei1+TR?aDsNZdC#0 zgl^H;_(&l7M-`(@G)9aeRW#YE!L5&oLZSOdso!_z_nUj>+%vna8i>E#x#!F`zwf-~ zyyncDCsWGr;`LGZ4@HK_{}mp7GcaHN9Toob7JsFOU(xgzJvEM89oblBD9pcAINbbu zH#|4~JL!K+`1bwBD#_(zUkxw+w1>aO!~aqWf6cq046*&^E#>+@KPCLPdHB4gTt4>{ z_r?F9htFHe{~DOTrn%P4``zXv7+y#$N1u z(9<_cJGMG##A3ll?1eDOB~v58>OJpTy@%hAewDv&jTC__hzxu$_~syU&(h3WE?pMn zfoa*D=5_ab$zN0!FDB(Wocg}``%R-(TUrkpy!I}5)Zl1f9r>X-cN$F5$$Cd4yhm#N zW!G$xrElU1>~dHY6S+Lg zN|9pmeS5LKjvG&Gx~p9`5b4>+WtL_EW4sVimjGgEpIHUC9s3{*ox?)CQEd3JU6u({ z7v5p@!0xW@pkZ|1AWbYkz!HkyhIRboqJqR411w8uieveoTiAt1(j?Oc=OGTCxOWiK zbT+=0cXi9V0%r>O^cgFQOl9K;qnKCuh-T5kt!-6rVD=x zI5D@TI3|kh580BlPhkHF4wq|G1k8&&y!XgezSf((?4~LCd8vw&&wHIty8HvceEd*A z^@n`swadRBSZ%F`s$BR#!F5|CIx*C^cAd2KQHwuU-(X!+i}W7Oy>Ad&o`%Nf5OG0= z;WcRquQ+2dfGw}r{dBT=v7 zJjCG>x16y|k?-ESp^)dgcUKBu)MMjFY)e|KVqh+Ctl>%BK>3@5&aNfPxLfu)uJ@Jw z)524}oSL|*oo3^@CcC)j?UTqJ{cFFtt$BU%eEY=*uc-PM8fzG=#w-ETg6-;CanR0r zyA?Pf==NeSJ8Q4VdhDUeKJc&c`ZIJZ%h69hDtqnimVHi_1@&g1P`zG`_V%3Mb?&u^ zy-Q8scz+PIs1E#@__*wKr~9tf?pYSjgcF3!87iQ)ynb(v-f~mZ6*)kT}2{{t^|03dwyBcL6b9gHH#9U3B zA4ZsWBE7MuX2#7^xLN;&XRe&Sq+#JTXS127U$~t68)7W>R4~33(nsh;&*}1Wq+F%P_=Ea z<0#&FjVbu+Yx7?=f#Xx4r10CwzE!+!X5NFfcT)ao^id-?GG%Z+QZ^d* z&BerpGX^pj6Zka-k_XGymFJ8;b0H~zGq~G>2!dw~ZjVqS-Y*Rvr*`bv&kSyl{GvE0egO&#j?o4O_f70N!=|fI7E`>a+N|t?0k7R3lOp#JHF(R)Lv(?g7 z1!2o_7j%zg6C?GiLN+myw(;6@;MLb+Z;mBMvgLKv@~k=iNp#d|OAqQ7bijQ0Bfu9~ z>1OH}#HoT~k(F+|cPX#nSY+jksn38l;XYkt<%_AufR~MaHGfU<0JaL2MOKIo0HgF8 zcdGY=>fdkhYWgew9>jN%Rjw322Dg0iy)Y$xu1wMYvq_(7Ip4xSLi*s5&wW1E^x=G$ zd_`UL{?eH6nWImCC6dgC_oe9n1L@<_2kgavnxa3Q;{Gax-{=0%Q}kz2+sUG(}DhS?nfmr7InWM=gL+06<;IQ z-SBIC)t#lH7mBIwD84DholXn4!>M^nf93J1(R9;46uyQ>{Z(8|H~e;ZR*D-m?0k{O zmgcIPFC5%Z%I3dXt`twpmod?2cr?s}_6x4*IUwf;#MF2xeo$~XUKIc zZaIHYOm$!JM+A4>|D}b?vIc`_ea3HL6-9@}0oraz7+U5|S$3F=5 zTO+Ia&xxHl=NVjK{b1j}w!e0HTO~wIAGiE#`Y3MK56ZRuroW*DGj5VxjE;_wh( ztjaLIZt1_p!hc}l8a6^&uILWuT2!7F3vsPW2=SXN6ykYfi~imrIIpj;aLkPuq5hjJ z{CZ3O%@*z-ftEgu&uqk7mz(cy@Qv{J-PvL*Hu4_8{>~V~miG2{;Tt5}zI>##x4DDg zZArXApI{bR&8H7~28KtmKlwT6lP#ljXcwDyVpDH?a;C2o@8oVC#Najigg>)AvzaAy zN#9yH0pR_ZMw}9bBx7wfB`^o4RpuA=_4f_*G7V-=vbCbV1F!A&tA6sqQ(_*$OhH&5 zrHJTe9(bs0Xn1s>uWwhJ?`DppCiR-ra(8gn%v~STtPCEU`vI8vUucTIa`!}MvH(Y}H0adc~EWbDFP>fuh@ zaBg6r6c_)PD|0pxq57DPU&@Kz7%$;5-9TSp_De6B%*976lI61C{o;f(8*c}xhM%|} z3N0qZ@D4FQ1~Rg{qw~7Xy<3Z=4ji-P)Vjf8CS^1BC*RssatiTW+bs4Q<_WH58h?w2&vrw3YCZw~ z*;KeQFt-WcHI%3LI*fZ}pe`{8<7 z&(&NieE0s*Z+jn5qL^;{>o6Ze|LrjZRW~4AD3!>F|EWt+^NqsKN_eBI=ywWV!>PD? zuJ3kgzghK;3xAgaVyeGt?uEZg+a~$%1V2!I*>T(r|L4GWxpe!GOwDmG zI?5^>D{v@(F+oTXWM5E^%9NJsvHqo=%fCf%7zsJ<;WJ1s{{>(^|DP9rPU27h85Nhm z8rOaN({ke;1&2aDXVYE&7m$8F{u44ac*OFbT$g_zFdu)XJi;jBI)(mow%X;_ssBzU z9uvNnOY)f{F8^R!{Dp7lACtu8|4#kKHCq-eh|bCa(tjq2%l}Y{|GR{5`_I{5m;WXt zo-h3ldi*Ek@)x2|8NQQ#(;oj>$GQCGl=y3>MBi;;{5hU>`P{n{zZz+i3iW!q4^=^m zlb&J?MA2UMFy|VGb%kO+l5=1Gv&r%_G(P`-tpDjSVRbT=n6v(8tZi*+WcOfcKyAEhIe6X@O_U}k+OIloA&X;*o`JC4Qao)~% z51bZnHz&QX3G2upMorz_%(jN%?vFOwH$-HQ{&gKtb8E5feD8sr_2@D*;R*4s;p>C5 zPt`qrsy&!_@urKgCg$SFGxyew--0z6P_Jyxgfow0Jq7Oh3hK@Z+M@B3Shs@xn#NI= zRer(i_rCS?^kbRHzkH`|67_rKDqyImE9Aef9E@YH=FW~`gJe)3d1)_^?aUpJG>g)?ig#wa&0TIWPJ%NJt}NiO(j zPc(Ht@$rkfX61d8y4EOrX*g33mOYbQ!1YZTtZ%A2%=1zS-=r*Y88AL;*|U3iWFICj z_+8Jzt~>fl%<0j-5e$ZbQjyapeU7G?jkv2HUz{D>C)!wLvVAxh=gSPuLe zEnbvnC9#g5^PSqxnKGYP%W)&?C)$iH}PM$XKmZzBfLwuOG|G)Q%xT~2F^-vaTi zqu6Y2=-TxI?=?6|sw1ricWW>_w>7x=OE9AGQSDvdVN~534$pcF?lgwt;`zK!K+6AO zy}^~J1T`=1h^@hi51)d*{XAOp>Q?C6?>A_j_Zfp%(Ylf>vvlEHoi@84d+#Iew%~qGo-TM8cx;M`%`h%9}|6s zhkZtsd&I)<+xTY%*YMmf=M^#4eZ>z6&gVz$PpiBr7UG(xgf#t$%aFimE)ji}3%fSh zVBuDi8EE=YUxsEzbHLKSRP4s^HUyq3-<|Jm>D@QfHHeS@?im{BAG%|X$1BZf4+;fN z5ld^+!lq~T=L^#T?CjjzFEiiODSCH)o$+dbXXf+&@w1$ogeVrOQHh#q)9_-FeQzhf zmD;hj8MEx_SmOCpja!9+Y4IaxI&SW~achAy=Ey09sz}d3Ki<*9BzSKFCbMN4+nT^m znYxB%lU?Ng8~gV_M$!I{`|ux_gMZoyoZCX)m%`WaXE+D{w+Os1{9^Z-T$Cpbf{gDy>3v4(9zmJ2Mo1rtJxQj}bxP0#UrI@7_^w)M> z%w<=1RQRs?E&;S&SGV2tybpYrOI@zJqUg6KS!eT5zSh-SfVpW3CJRzsmAL#NxbNe)3BL>ug?zRbF8^cTS9?3ujN}2~yZIo|QMKUT2XB;V literal 0 HcmV?d00001 diff --git a/ohos/arm64-v8a/lib/libtbbmalloc_proxy_static.a b/ohos/arm64-v8a/lib/libtbbmalloc_proxy_static.a new file mode 100644 index 0000000000000000000000000000000000000000..9a29c61a6f626d86f6938ae2bc9a5b8aeb89f19f GIT binary patch literal 16244 zcmeHOYiwM_6`u7P2(H_t4wOIwENw&`AR9jx$A$nK2-F3Gqa+lXx>~Py*Ll?TTJ|P5 z5L8q0BdApyN~jQ`B}i0q(jrkcDQ%UORmzX_MY?Ij=c$&fMMg>=mo>V};E(HmpbmON$zMeO;$g?d_dR(cfTQ-5u*IT_fen=CSg9xeCA`Z;qPK+(>?yD5wt4+-GO*3(e#<+x;dW2SSjlU7xWe5IwHs00_KcwY4V8g`+~Bs6LS;*NBsW$X9m*F< z!=;f*x-dGLE)Q&7(O{QX8>Uimd)<-o#I|O-TD)F=!?$nkxvegdpe^@G!n|Pok)XzO zF8BLc+s&GcX-Dq@yxy6A>3b)86Z=m+P`Cfo!{$)}_w26AzW?I&$v^)mX%0|8?P;zz zhe-CGD~x&6n8_a!-(HOO)tUD-HjgmiWXP;>$bb#aW^&zb((?@2bS$~>z^Mmr-hZ-p z_5OFxyX8ID|L#4%qp@#4?47FFpEz)m*6;+FV;}~7}loOFh%l?*fm^m;0?8o zcklVb6xMNq>^zoC(A>NO7@zF@J?#mt>lSZM9wJ`s$1}zpI#xfqj%fD2LE}l1d+LGJ z`{A3Dy+^F9kGiLE{)ofjr?ZIx?T_j9bh`DgPMCx6(=UxVxcOZ3%CdU%N^^pIc8+-z zeh?qHJ_65UAD%w+-_xfrN>NNzlexc`W$ThLwOU`uG6Wvq&JZR4d2ep2`2`p3@QJMZyElOm{}UMNRV1?C%m)JPEI z?!p`uu?2FUu!u3M(Do>~G{pR`fm5lX3dn7=a$W;YMeGO9n-(#q53M5;x*KWpQRw`( z1z3tPBo_*>3}Yf2Shs^gUms&t>bo{#e&;mvkolA&6WH?(201THBM0-xr_5tqw)Z&+f@b6oEaR`6V z;#hsK{L12&20dT)m}7WCzP}OZwR$NTXLR{<#o-#NzWq z_|GhkNC=j_7XN$@q|w6`ZwldK7GD^`H(6Yw-Jkxe#U{HVpx@d57r!s6$K@Fy*P zUI>50;*xp(^n}IF_W|w=TO1$Zf@Q$sp9z9A!W->`@#9T2{gbvfVf?ri{54DO$F1O3 zTY5j<1m9qBKW+s#WKY7LSe{9OFS7K09tjSrq@l@(l7z5`lB}I_!O0}O$%`sU6W$ll zB~5r=JX~AnOH7*ZzIgQEeev+{zIb>zUpzdVFCHGw7Y`5Ti-(8v#lyq-;^EA1PldVM1RC^-xbC$44 zi~H-X(ijCNXOr>$Uj;~|pI%MI_gj_5C>tohnvB1XaRBR%TN)h?{tet+oRGFYHM*i(2ze(k<(Eo$@ znv9h<%wkJt4@>j(dwIm-Cc zL@zQKvdvXQABF!Qh92+PqUg8A_>(>-li&PvY?W3{{Y!}N8xDpc@Qn%=oeK%SLE*9w zmpOQfX+Ynl=$jROqr$J@{)I%Z@eJdO3GY?(o!p0f(2K`qeXvLC%`&~n7y53-UHLtX zL;iK#N3&Mp&?_0ZfpO^7^!G9@dWF89aaV7Vap;w`0NSi@=uPo>h+F8@^rMW6UZJ00 z+||32ap=8?`)DS)FMgJN+Qm5ZYWm%bi(aAM!?>$=uackTKAI=E5BXW{qj{G5z;zr> zDO~USVa5^9w{suO*~ary=5^yi)+hcE6)oHmKL{>y>-s_V4sx{pS!NXZ61Qs@haWV( z#IxuX`n62&>Xo<^`4XQJw~(*nxsT{fL|0uW6tLZC@i(aAM z$+)|&NyZ^x$LB7EBadYNByOQs(|?cYMX%6H+`4)tZlO2Jyl9@{KICV)k4DF>j_02< zjqCrH7)Lz!a34*h!oMDY!=CFEF7Xff8c#6|*7Z$A-^_iNUg8<{XuTpIdiA<4P;xY$ zVjT2(-$gHQ?3WbT2iX_FGu(0aMeKweoiCzGbc}IDJ0qcstB~L!8g%{aZ@;U(271XU{SH z>>oFe=Hl6N%oFS%oDW&g%_klIbMH?)XV1Mq@%->a<)QO&7|;xWQ?Hp0z%wO3=E%dJXY{7mgCY;kCO$8d0oWTFGmz1}ok0xqS$a@XC< z$1C~5x6JsC@j`iIFzE2{{9tJ;H*D|frU%By$>BY%E#{O7Hq2MBMY>%HqsxVT*^ zY6p$-^q>N~DyG{y&~3FfRRmmX0b5g5 zOt)t&Of}Kj+5+wE7N(kDTYEs81#C?f$!oQMSCi4@V!o13lclX`!f3w5&f8iaZ2F8i zi$2a495w!b8a?K$*M#bN2RU3fJ-H%94S!+KnLcf2M>e>d}29eZ8=B{BNv z@%ejn2L3M*U6lPVGyj4a_`efl|1suYH3R=|WArZ}xhPdz`<1)?^JDm5XMUI79hc+m zXZ}84we7k5_{@N%y79M)_6230*VJm4|8F!C#lMF6<@-bt;PT%Pv;KRT-=%lQcg65~ z_wfAtjdA)fj4 z{`+J2r}zupnfMn{oFc~9-PH36-Nm=XjX!sMe3tyL(&o)Z|95A}-#bhD|4oiV{OR}1 zaGdy0{|^Ed3sU-1&U6oJs=@?S^# zqxcs^@FT}u{+nX-w=n<9yg2BG9+&^382xeuTJz&baQX55RFwTM^Ho2+euSjU|6q*$ zCzyYNcu-RGLP)s$hhp^ivtn&OcwPSb82*;UL`+57kC1Trx5n7t&HUPa+%9wZ@p(GR zf7J;6_+ORF|J4}&*CP0F8`0&*^F2!cTgRE{Y R*G1*Rd(5|vTWMVV{{uY&qyqo| literal 0 HcmV?d00001 diff --git a/ohos/arm64-v8a/lib/libtbbmalloc_static.a b/ohos/arm64-v8a/lib/libtbbmalloc_static.a new file mode 100644 index 0000000000000000000000000000000000000000..fc1ce19c19c8d5f52057c529f8f19fa72bb8524a GIT binary patch literal 179614 zcmeFa3w%_^bw56LSHP|$KnNiowj+VaE0%mWQwe0%S>k#BnPLX<--iLJtha z4mdcCKqRqjlDgmqoHYF*#UxHEQuC`@<2db47{|qNn!0IP3qM+c{33p79mM{>XCAw= z_uiF+^J@Fg{YZQ7J@=fMGiT16Idkspo`pBmw>S7d6Mubr^pUOIifjEdJ>8R&TbRdj z>f#w=&ok!vIOZG{E)3({TZ3{0{xPiOn~ zrHze3q@pxU%O=ZGKIv!`*M=A+O_H5Luv6u0^3`|vgmQMEiPfIk$&u$%uXWmtDqBGo zZ1uGVHf*0PdJ%?5jK(!jQclGi0xeNxSUPUpe&d=bMp0?iI0~5qPS^VG5&|scmcSDk z?%W=X!*EW0$7YN((y(>arOi#1fk5SWx|suYtZu4bFJ%+Er*dG-B8XgZ3pi1^)zXAJ zET^NPzJ*MuucP7S(s9_%Z4gLHTN*37IxY5cye`T`3nG?hyrG_pCTN&q@J+%rg`1Px zt}FM%mNP=w(OKW#c}s^h|6)3bm}+q!nI6UwRQ^U^3lzWMCZ9A#Fg>w5O<_Z(qJzyf zO+eg9mZ{3Op}MWU!B^T`IbKx|ZWK1AiE+>iAdE(sE{yq(s`{3VK2J_N=CcmW$Rvl- z=5a=dl@=W%XfX?sxgUqPFiO*Cp0a4$c;lroYy`?%Q_oNURTV6CRLBG4Qd&#dQSD)e|X7A+kTlefG;3yS7V@#L0n>l7YFWov6wY@?Ll zfC;K)oCaVXf;ULcz&Ls|;bb-O{8ZQoXR^I@yeX`>u~k}6i%crbYG)Br#1ru3GzVIE z?^g%zws9wm*Mz-a8_#ofl(QUG!ziz&5Jq@Spk-}qi?6w@b2~XVn~0$pwB$jrsow^) z7GERhiF|33TVB|VZm21%ENw2$^TSuSG|<%Lo_uoUKGxVWmsIGlZ*LU(yPUM8tIbAV zEJV$GinlV5t*NEeI9Q75Xx-3xsdX2dnjW!hlQAtO7RDU_o_x+5x^87xOG9UCdpWID zay(kIIYMQ7Yg>nWR@Q}eL7=6&v%RYU_RlAtRWujn78Mm_7nN2td$Nm*^F5w|oWjDw zg3{8`ihxJ{=i#n2$Fr`@*IwU=^$vXdw)WNrOdGB34NY?F7b>2@Xx8!ndtv;y&0!7~ zyRB?J#W@hH5;2Js_x#^O68?S3^w;3{(u64-J2A_0_3&iZHw8Af)U-leHg2LEC^~@$ zW@>p?dwWfw*=J_}tX1T}5$S5GZy(2MScx#+g!fYAlgm{bD`6!gBdWOLF4W{2RnUPs zKspYWLe|yRSl{X6lC18;RVj^oPtgirLpv?_*0qUEV%55so`n^)kQOvy4|Y

hwkCI>EioBxu_#0nA<&fD49TLH)M!icOweniB%n1>47M@ULO! zf9IX^Yw-G8?v?DE+(@@{u3zuT<}vFHSdAA%!q-Lh8#iJKsfROGE5ifjrUW)~Xs2FZ^%c++Ra&^mazQiO7~A z6c|iTc8l*Wd216oQ71b*sa<1>iZT0OPzLkL{Upbb8+1Y&c zQc6`;Ev?C}tzGTOjbL>8!alPnyA1)L+GcV+NW`@seE}X(b%`h5I#}?O6g(bY8u_a5 zd7CO%Q-KtZrzlfpYg3?Md${K$u9Ul_$Owlyyi67Uu^@))7I>(dhczvD+M77Yf*KuY z^tC{`^cFK*NS@Y!+O3`#ZJ>Bkw4}lX6!QZ3;>p)o8?$xc=H-YEPz2+#AEt@SW?E-? zgv(9=CH@+9B5b(B2sc)&qCSE(Wp z6n_lMKSp)ULW9UMR?sVAC2f23g4UMkL1b#(1k@tzT*_@Cmn1qnyEb1))#~&`wXCR7 zwCbjI#KEw%4Rl5n-j-Rh{270w*b>HG`Di+Ubz?`2sHx62U)Hpv^X)n!2NJJuf}Zg8 zMnwP-s6cK`X=$t#Vodz;7K&WNrpTI2FXvz}-QL;N#`_|V!g$))jfg1K z<#S?;PRb2C8#JEi1p&P}5slH+*QSnW%6#88vCl)TTHWZ{K`N zOBdc}*~!X`PAgCTnoEplF^-ENgDSkEL62ISV=keftL4`%qmVE95!JTop}ckNZJ6P) zU4bk`2Okq$-r>oaNA{y$3V6&!G%%XR?4lYO zN$kO8#*EF288b2xIXAimvKyQEv_hh7JslK6>+0|naSx_?J6!B$_!Q+W8(Qs@$V?}O zoQ)nA;YE)YRG}c5jbw_U-_E@1J{!xDP1erM$q96reY-}yY_nlBr>F{{ ziS~}bR-d3)j!F>i@5GzV)^-G7yE{h#%cN0+O!0+_6*LHG8 z^iu22$rV0Nxc(JBY7oY|Ww4IVMsa&{Lv1rcH`}a~{4Hp9TUUo4wtynk=aduxuWS!&#Y*CCA77hNMS|KoHbgsVn?Zf!_L|meyyCM9>PxiT4d8p-T`gjS z*iFd_v4aXyHnX(JI({Ug7_%l9ENoM)6S*S8`UGRgx2&tVjmAyPh81w3^VXN4iN5hr zeZ)p-OKaCge-*96tm-IhFCa09-cgbeu?Ff^39)F7q=oLtz6nc5m~3b)U*yFURdIde zrmhb7(H&SzwzibFgvSH*dn{&ia>Ery>yynaCs>SW=L^8%YTx!)W>!8P#}$Zzm#ftEdGm)Ulm3cS|Hs?2+MBRDda-w@Y(28v0qJ8VQGt1An60OC3>kS z7FW?P`<4Du+(>8&pN$B$wr1HfxsJr-K<{$xVgpflwH@^vV8GRyk}vMZLy(J3I}~by z9Xw{4sw`|~!%jJbN);EpBnwnb7R&;-DFz-E4};V;v|_VEOQTK{e%9RD=!-l64|o!} zkLJN#w`fzCl{Sp1ZM9f>@)kr2QLJ5W{z`)OjRd>PqVQK1>f*69+=wVL-__Kyq@k^C z39he?567V*pba$nz-Nm<0-c?;Eff~o9)YbFj$IfKt_qCk--{^76W@u*EiNcf-;UT> zTCvh$82FNeI1KhTEOMuyjJ@;u$hDVWdRZH5<+wM9M=8OXmyM3r8hsz!=IDEGowILj zM_k{S)98KQX=J_UWc@(fKaY8jzl-aq;@OrV#%i9L%C;Py!fNJBWBKdZuBzp6yQ&s5 zmNK8Q2a28Epn}D*UC9}^KjTd4f8CkZ?>16Sr5Wt}kaN$0bSDc^*=cd?`wV5hkMh$h zy}zuR%4&w3bNY#fp5Nyv>($E}LOE2HqD48JHWjoij@7v04p+@JQcsC8=5oCE9dmjX zfIs4s%G%8iw-awvmg0@dBOcJU@1H>%Ok>OSm9={e&}<@M%+?u}4r?gzL19qmwgz5lI%5So~(b^|bwt+gp+Ya>qGpzSrrvv;M{nY=d zUe=odnVq~?>l(Zm?|L>A$`3eLULjyb_)lBTwp19bCT$wqatb^TJLhGUq&xrFHHF=b zzTWa!C{#0VM_RQ5d{cV|QO>_ybh}QXoQ#X1{BDOgm}blwDF^M>94y#9mF{PcEW`an zQ(5rki-%leJC=jLW#Dru^dQvSh5K1HeUE>qXF4CAR{Mag)zKiO5fOW zqxbgHpZ;gpTyK!tMS3?pjRix{OE>cdT{YjR%D`C2GF&}nhBugDuz^(pZ}GF}U;k&k z!KXg${O2?`JO4EJBmMs(_)ud!<2&BsK_|=0ywY2IlFk_CSu?%C5{&P&&bDE)Z&YQWzDy&&rw7ln=8$b*y-n_C59~V{e{hwNc+oXWycy(Zn@jbW#Yzo+lz8myH6h2+~+=+X(XK5$?M;9pa6Az0D3=o%lMYJ zI0J2he(Ya>@kVywLzHK=1*u`%a+t9Js*~)~LfDv(FcvfZkGFW3vAm_w`yn~T>m*(h z_@eUXqkL61)f2HZk+M~}ho`}YPGtihKz3h8|N7AuH~J(KGI9k1qup`r*uTJbZT+EP zI|hrv$EWaLE5~4McnmJEjKMohWAI={(F0avh*6BDEkGfYG?wlo}^w$j?+2Sbpx z3uEudOg1o#dFfr*fBPM*pd0q$B`PP^b9+yi;X8?2{%A?v+X%62F0vGb!)&0@&+kdeUkr(`T~a364;vfTI4 zM;XD;z-0z2pt<#14ptLyT;5ar@9g|?T&FwO00v)50-N5`^bhS1_84<|YEf1L`hOq# z3_R8xI>XMtiZkr+$jG!q3u7TON}3Cq?w*bks}qumM5$#n7?ykOA4Hlc3Kqyztx7`r(tz0t;_nLpWIea|D+iP=Q#tskj}W`1S}o0%0PX7l%GlM0l#!E zr~1GzjS+h8iUUn3bSwydV%a*<_k8I4kr^vSh22Z*FH1l<(0ezYxeWKg)TKu6+=WIj z*(asLZnTx`#Sz%SK{p#9-%8n_vX801dKsj@{V2O1a?gj%Qm)1t!RhK>g*j{384rJ6 z=x+&h@)_ta+2>lC2hhiA%3@@h?&p9zL)Dl}cLwbA9XqoINCfRFg;-r!Ey zqopXnAM@o1V02G?kpQ3I4e+!X^52BMNCfV_k3s`I=!>5~_Vj!NG=!5N`-5aR1lvk$ z0JA+^1UbLzOv@sB3fr3!dWLM)6zIrQ=*l$cj0?KshRuqHPw(h`^(J=wl|pv>(QDX% zYtf%neIR}O`RTB=UT5!Rb*%UD!>l(C<+_-wCp3*6AHK%XHHa;Qb@u)BP}H^KQwmszksD=Ogqb!Tpd&ZG!jwddPJ))I~c>~&%#dR8?N}_pK(7apM0X>pLmPY zunu@1&j`+GbG*ml<8LNC5zpsr-Zc}xLmJK1bNfR-W5v|=VO$emY155SH~2~$V}0(0 z?D(8sR{ZL8_++WDA=fPHd&31U{8@@OlG&L!tLL=DUJD z#qkGoF^1m$Th{jm=7F?(S)U){rBm8`8ZY_io2QnE^$e|XPZ594IhJ|<*xix!j9vzf zm)obZypd~{_R*Lwzs_|q<7)I*JPQ_H&A6XbhCY}J{e(@zT8AAg#26-@1P#o4AMH;| zcm8uXcpY&j_P^E>QQwJqYl559s3Y< zz;91+OFe-->A~17hONzaP4N~VNnm+oS4bCJZ-?JK06a z4Sh^wCxm)k#$4_fg_5!66lM0RGQ)liY}qc@HNR8XxnbZ2fUA6>yD$eSzlLN@aw1uC z`9nskkEn0T&`%_*vmbx6Ok2}NqINPF+ z9hyGsWsp7|iHA-Z^G510zq-&4>OVK;MK|P4XFgwn=Oc?)Z`#-)mm|R&6g8pD7r#yxj&nmf~c=1dJ!T??#`~J@t8t z?DIj;t8tM6*$!T$aZ$kiRz5ZyF8FjO50L+99vkL)=%bDQz~db>_A*)KLGlT*AQO_s zZtzQRH~fQ4$mm+gs5X%W;TyWH9h>g@F~-Mpu+{K&vdAB?;JTu5AJcFXP;R1;aIgpV zYc=q`06DBi8`nb)G=KL%4m7vW^=j0)4DIC4Ap^3%@kac?)zEjqj?uFc=+;?hMwSaU zg8X-q1I>dZ2eS;)@m#Di5Hn;(FZm6wdg$kL@A+ET?fc*#v)2&FWX4g<6GsVmJl~4+ zO4q5mkl!50i*&mWI&egbYkl1H-w~Aagr-Zwn3pMrL;h~}4A=(P9FqUDYuyLSAs1R3 z)*-I22jdfVcwjGZ$mgZ=D%_LL+YK2HLvKz(fB2dQGFyc;kKf_$ft`gt7_%SFynNIg_k)bGx@jKeYbg<Yd_<9^G_)#1yd=|EI z7;Bh8=o45=*v7U=Xpo!$Pi+{CmpfrL!W1TFE4WRo%{(a7WOH%{}a%^LGXn6Y-9+s ze4ViZ#D04S_bQAHvKi!4%>v#+z^UH*0sjy%S_9Ct@pvZR^(^=$e5zCNtlB|7$_MaK z4k0c+Lj63I-F%kHG-mf;j@nWJU40a~O0gn6hWX*N6{DoHu*JzGE=Qj#FGI*9#f4a) z3**Yw=PfS7m~=7NYb`G1b{wvPPj%kqI$Xt!JqM^P1{j@V*;n!(8OnLj>CB=v2KlGN ze;s5-eh+-=l(P%uJh#XjB$x~FIhvb;5`GTw5@Y_z?GnBZv3iI55nht6YJ+GXz!Kamt;@8lPsu@&ErSe-{DUPzSE4pA3Gki9Wh0L$nSid=xP0XOG3U( zaTlLTcDda>REFzl(}N@%%>ThdvR~^^j=5jyoy}zW)$hE#Kf}caF4@PHZ3s{sw0>QT zF@FgCdIA0VE7`9I!ltvjk`r0WE= zU+BcV3F8E!{JF4isi(X4lhP!8x(fc*0(66Uvu=CHNKaNagy?eN? zIft*O$v35V5b;Ryy7!=K#N$gi>*Z0bdKf+&>Bb&he*_}G#M_gtAJZ(8PmCbU(mQkFA>NWe}KL8(RP#3M6sSG7+D*M$m?{PCe z*$KoiYG@8V1X?>BU9O1t&h3VbsBVgBEqymsqx|DHVS9;JdQQ*ke;KMFzqtwgPUJVg zi*jT64jH?gy|sv|euy!d!MYEeK}?bCC*q8-q0k?U3##`7VlS_t?G&%AeT1FQMF0I@ z5j*}(_>Ui8J$U3BMsMotcRx7jn2WhGbtDsU|FnhP^RHsu9D)D%1H@DAg8xmjz`8Sd z7QV#rZ@?!Z|8`XKN&N5uj=&G;0$*jYzjLNwU5R>kJCttF9G%)v>i}w({}t={>yU%B zpO|{+0L`gK9FCs8zh}Le|6eFL4}T6m0ksdY#~QL-DlS1WgKn%fMSqI8hKRq}zejy< z-QOdwg7>A%QTAf2L8oD^$Pn+aXzkSpx%t;e&n;f)lCnYd^d11dQ3?DKie0N%6veKU zj+5P|Hr9bpIuBtU^k<1N7+9igy?dk_V<;tSUlp~787V!4i}j2x?(oG>%?!vTW!5*U z^6(CaXcnSR6&-lAr5gm@*p8)GJN=OR`oxpEM|~;p&iurvlE*OK%e$lI)p#DaUnu8Q zyn`9Y;&C|9_7c1+>4Dv#wmV=0sQolP<_^! znH7(+-MCJ>f}QuHZ{oN4M}gCuX&AFMg+c>mCeXA zy^s_0CiyU|U+4ANJAQp$hoM`~-fhq8eDGQlnzm&SzUrUCcY6Z=3m_Nr@6N!cz5@T@ zRm2isbI#3r9q+;a6SDm;*g}#u?Cde<@htZo*6zA@YBl-Gir?}pPaYuo#gc6-xpq?- zK%D*x1DRA8&#{goxL-5%Z``fo%rx<_&V@~=-pD`y^e!HsZVDw-umJh z*!5w=WocbDig>9Dx*UoZ@zHr&d~_J=pm~Uo4r=epPe4{LI)t18XrEt`6UBZ>R+sGm zpd1yaq?iuXXD_EzzQ!8V+ z?}3-`{0PP}*})m0qx%5*a|UeE%XpV`AD+z}n=$wm&6gQLoRi9J7z1NbcHPBoTRrNa zH3QjoKi)0TI~lU;J1+{KC}P(|`)Rzn5re-I@lFJ4f}~6T`a{SU*_!nH>{s; zKzoM&niY4WY`G_S9??y0xDfyj&KF5;Z3Ghnw?D}w8A+_^0wEGpr ze_wru^-jgS@%mpAe`M!G`(U%rr(+#N`@YEM6XR6uds1Z~K0BA>IQ-X$F=PHu0Is@s zL3gO0t4@21U-_yR^G|C36x2gJ5Dshu#d&+X(Z>|$MO>4|eKTa-cX&2ri}8DwV&+)? zKMEZi+fk0OUWPG$6UP2Z*nm>RA6KAkqaSvG#~-O|h8V~@;En1s+dbO%M02*V ziG5mR|FBPOpzD)}%Rp9YJ>p{kCl6PQ=c;u}8|kK=^R8=dd8{ zlOkJ4{*`&Zn7Mtlr-Ir-Jeuv($F<|oDOZ@MAMu!Sg8DyN4_v^fxDK^d#Z6RPhWgXaXO(5Vpma(3R^)F?rcS?%)97`|Sj{EZ zXBT(V_qtFU=3?&$`8Mm(&gD^ih{Qfh!r6tfOnZ}-Lht_tu?Nyw5o1dUuf1q}TZi~7 z#j&Wr{aEMP#i7hPLUCoaACBsxy(rWlBJL*kWvaa>TT!>UPh+j83bJ<{hi~AacYPTc z_evKBry<7WL>xQoPpaY<|49|a&*x$s%`xKnSn7j*{mx^wM{h^0D`jLTe*74%%gv5a ze3s(06o04vDDjwkHp@7z3vD>-yw(z{{S^9y+RWR!G_H5feR03Xcr?ZJQbypTjvJ3Y zf_SuQt2rL+TI$3etxJzdCtUj{RkYWSV$<&TL&s>&pjh>)`nX;mul`l&*vo*KbyVrj zZ_|#E4Wk(TVT`eE#GqgY2KFG{3?Dv7=W^U<%(u5|93M8X8+%b`KaSnFcWH558ecS* zP`gfQ^T`}JpP0u0`P;MyHza-TP}t|5Yw@{1)O_wE7)w9UZ1oY@#$D+9@!I$lYT{QQ(E|@9C5Kd&n-*9tvfT4hgU1`K;P46FqV_yG?115Wq_aqthYFLnp^nqfbp zE`Rw>JeWC~9jj%|k@D=_2R_6)v&2|&es?m)MI3tp@q}{=aqYrhk~AZ3~x*NUW&a>IoQazX0Vzd=UC@&*3@`9ijKddvNADkFk0vbjWodaE0GTeJ|oVG$-v! zgp3|BdT9=upNPE&h<#;(zqFsSKB{kzr>2j3u+q zd|ZEzWdAjj?7JcRPAU5o$dTGeI`WUGpJbB>dE;GLf971QtFaEF7}YAsG)wN~l`?)o z?MvO}OzmO2Ye$Xc>;)Pt)Q1P1DLsW(#`U?Lsomz@UHimcw1@XQY&*3tvzi^B?@Sv# z3;U+WPCxD*>0y+k?+pkAxMnznZyVG}iYtG7_<; z5AUZVHpRQ-?ZrCrg?aD}} zcm~@-d-nX8qy3OMjZcav(L4tk@%>E{r%~}0T0bb;<;J`1Ozg|b!dypmwM2*c?OKfI zokl#@e-(2^8DY9Kw}_vSZKZd@XR*JK=3E!zXLIlMKJ>xGl`o#d{t)t^_smG`$z1B_ z?OOQk0d6OZIsM8`WP<+y-l@=D3P1MyrC}f1aFzSueE87w;6rDEekai?VUF2B5t%30U_~TU=QCoU4jINNxBmeB#^TQv`mWZNVC7 z57v)v_~_JEsy!52qkFqH(fdi#3qR%qtUvqxhy{%8uznxpSJPJVcMW!aG5K}m-_1!E z{!Smpo4)@0P5Hjv+}D{s;zc}@#(^Jw8IZmXot3ZC4Zn-*=X&6)?_-#KR`NHg4B9h> zcpr~1*?rHfAccHa`hFeWU7()_9Y#+U+L?*5PCgcWN2BDY4EsJJKS3 zT)xxM$0f<^2;x)tuE+U>i1}rro+In=oez|YcP>46M-%M(ruYtsdF~DG(ZKjc?yVZ~ zWBHs7UgA+N`7#u5HMDn0xy*Hn_}_#!D?9KE{M5~`AIc5{E{bv?A?HjW`||86Z<^JlM%h2P%;VtSC`RjO-A(Y zh2o=gokq_vbcos>ui4FS!B(x`2g0u>9PB-ycR>UvISr%jw0_%zGxdkA*Xp~LT%MReFGZ&m4<*nk zl7knqeI>I@O|Q=2-KWwk;+N_wLf`!+`pnEjI^Ihw9`t@A9#p>(4>U)XAwHdfH4MYr z#l;+WZ_h^hNYC)z{+l>co5)AcaCw87PqN;0y#LO?+Kt|Yi}&Dc$>#FwMShp7ctMI=P|6hm${*<3r`S#%RWVOco1 zEM5nW9(yHuyg&1bQT@BoF4zDSTjw&sn)nFb`_!ZCdiXZ<4s8)+o)0|ITN)d9ksW^q zi;%xVgQjC%2C6Y}5?}3#3V(l?C%a8atgO~*Ifqg{DD)#yQwB>w0 zyU_Z7v5vFyUS$@re)Qs?;-h^KDa6d28fUj~`;_v__&zn+Qr>jj1@ zfnu(QVVmiDDIbEa6Ys#bErWlBKAI1G{Q2jnR{vWnYrkjhrx2@LkTnGT?(VrA>$S^P zXX4#-Jl^@QLV2t3&OZ(B{LAppAM=c-9PhJt!KR>|YJAU*zw^()bqU`252B3zyTKFI zcxIjuD@h>w!=QcH;o5ux=N$&ip9g((dGQ_@u=}9HBHo;e_iCb_KXkCHUy(j;#(N*h z+a2H`7JtNB`NjxuWw@?QW9Q4j+n&4c9tADuEr9(9nNnVgx22#}ygZF-G4Dt7GKf7< z>U}=#mDsd4qi0n)@#f}o!~3g`^-e#2hyDJ8RO~;XzPu0no5+S!Uk-5{wE9;UTK|#n zB_UlWUpXFo#8+Y8Gws*jguT?KFiyZj&)`0s?#%a3p~Z1%=CsP zBrr-xKC%w{SWSBIlI$hf=^0H;Tbr-LuP0HGpY#U&BvNDk2H*PJ+=k-D4Mj!8o}vvK ze8u(IjT;IJ@`{Vs<2M%T^9nN4GuGp`^c&MrPDRD7YeZq04Br#thjBNygWU580^o>6M?5^vF&KM-&NZg8i3flQ?tfV>F!d4Y z3n~NO5#UFqIB}j%NZ6Bfj?pL7Z^IG~>(&YDBDu^Xo^TY!ELX^d_*x-x-{Ta5*V`qG z>ZXI_*Cb)tG0PPCgE@sC*e`C=cS&3pPe>e+4IRYCa}uUxOzprJ<;Sj=b&7TnpNVjA z=@_8%1pPtnxk2JT5VPzQb~*4GC0-KYRcQT;TjKwI`MeF+k>@Vnro-~t{qzh6wTbhG zb6d=MMVr!rGbHH;acwsisoimL^pLS^I#1B$<;*dRPh$Ny>hd_S(O<35SwyeDxg3CG4hT?Ozw8Yei& z*vA@I>3HOocqedeS5`W}jS|O8{bjL5Nxa?N5+@HR=5|vb9g)wo={$)(dP(A+0D3IG zE=ZiiF>pxN((#37I#NiFV%f)o^SE5`T?Bsof%2{}Z`gPQgYbIv$|DkJ)Bnms5bU zLgG_DO>|5}yRm&IWXAg(G0Q$}2D2g#zrh0k5{J7j@M9c)qXoW?!x51%9X@O(W=t9h z?b$L8S5k{SsO50HQ867ia5#3JnU47!KGOtc>;ujZ|A>0_A_sAzD0n}I(-x)Z1Ml~6 z%#H@)u9Cy4ccPE$IDAet5O+x&o@#-oaaQC&(Vl*m!)Y5+^ija!YF3Ioo6X_#BLI2z z4zCxhCe!g_4!_IFma5-?a zr`5b&3pI$i%;xY#7Wi}yztRFf$IB-R5qNuL-z~fj`aR*IVG+ z;bqwtcsC@B|AymBW=s68+d|VY&1`Bqd57;vkd7x-2?FW^=?FW^=?FW^>tF5p&sF5p&sF5p&sF5p&sF5p&sF5p&s zF5p&sF5p&sF5u~OvK&rkwcCW`WLCQ^;8wdW;8wdW;8wdW;8wdW;8wdW;8wdW;8wdW z;8wdW;8y!A;8y!A;8y!A;8y!A;8y!A;MsJt98Tu3zzE=ER{JdAR{JdAR{JdARy!=< zRy!=w2fvGY831^7RpUCI3G8=|21#EHf8{)*V!Y*GXq!t?dETUHm{y0RRtj!BNUIpAwaX!;=B@P}d!R43G@MIR52YK8N@Xef` zh#h`F^7#UnL&R>Y*vo(M@+0<5N$@1s!%xau8S9i+1HAl*-ENcci=6++{GzUCouACE z43{$`uNLzDRjo(-LGpPTO-k16#xEp(Dz7(UXZSiE&+g*&M(jL~{Q$m#xAz)JjrF>` z+Q9oaVu$xj_>H`rh#jt!aD4X|{Gf7qoR(+o1p*wE$m0olPUPiB)^|KkiDy6H_!0Yd zr^J5&0#9bw$vecP!n!vxt3uM`9erIhjT5vl_p& z(obd)`>f==i|a#VzToi|;9dp&oTbGXR8Ks_^;VUNn3ZhrXHib1wUQZ}e!%4%@i*?5 zj}G$oM*N~1B>bmco7yCg3%qrzI^|KDYN5ZL`|Eu9YqQRfk;A-5M@rPpI zyHGA2W_)Gu39gr~>=42Ab}7qa#uueCM)LV9b%XPzmeV9@srkmZw}d{g4k zsZ+y$T!X85o7(%A8ayS2eCnhLGvoh-2G`}I-USf;m$dS`HGXvbdt$(Ke7#*OWP7Q6 z9iAe~G0Q=*VAfB`KA#_S{amc^vqKT%JgUKU{z<>-F!Phnp^Ta9y-LE(<3iVez5K&k z`4383I;noAdXH%E=QMm>KIyW5Nj~4!@E?iaP9R%0;zd?XG zyjNb+>8Bc8$JfUT&C%-6^;Xv_zeJ#V_4c-DaHV4edPIZome+LB`MFPnC!$qs=hPJ^R!luj<3%lO}3I32E<)6DW{n7d9*5GIN|I3-w*?yuEF*BYlQ~a=PzB)^?EPY@bz-C zHTaYAnoj#QxZYmn*PGj07X$zP81VNrxGo2s&s`FSPKr+&Uw@{-RU3()FKF;|+|yy^ zQ@0O)D^U37OEK_kH2u-#K5`^V;wyhZ!G9X797$(x zS7QwLK#X?natH!*IlBJn{13|VN&dQBeMBo?_Y=yr@^yHnCN~{^zsA3A&%GL4uUBuc zu0OYH{Opsobb2iYTEt2FrI8vbex{v!=up~2tK z;A=Ga z|04~q>=MEMScBiG;n!;LT^hVjgYVbi^%{IYgRj@%%8pUJ4H|q@!*A5!mm~f_2jTm~ zCI4KZ!8d4dzXso^!S`u!Wz*>KV;Vf5;r~K|Z_?l@v`zeM*5Fr76*mNL(%_XEyjg?e zw?Ozo_$?az5e?p|!5`P)Z5sSX8hncee@%n8Yw%GG-l4(gO%pf7Pp1Yi)8Jhi+>#Q~ zlih(t)X3;o<=apyGDyn1^2TkHU2_R?5!Nq3X4K9lwRfzIN-kU|Q~YY_7RN{Qcz8xz zWH3hl!vgHfMP51NI2@ly!<(3uoNSpoQRK)*()mDhfD+=4Uq=*U;pCH(XnGjeEE`jP zaZh1K>xNFj)Ogvhk-Zn$hFjaW^VHgf$Y9zyAyI*rSC}Jv&>@uHvLn#4F;dxNTEjDy zlC&n4w5Pb;x1|dyP$`)+62I5Cf$^Av_T(&Yt#7Il6gN|TSS0JVR1}@3QmL1R(AU&& zgVG@RHP2Nk`;#qM-_ZfJH0vc&Hn!E2RhBlF=J^9HoooqZarDu}#`e~(Hl}4TWX)YI zz-C0kn%j|pu?;CGeeLZnt>a0-!_&}K)!!w03j?KjTy)PZRLkU$!|l$d6q2Hji7HSu z&_~om*v;cIr0_ijw_sQM(#A$4v+bzDZh0T&;T3t%S5ijNk6lczDh3_h+A!I2JUoe} zY;zTwFs|Y86jBaTo>{lNr3!2#12s?HS`10cdu#q)k9ELg^R$~at(3%ea&n{W>wMga ztj6OK<#_$R8Jv|lR(Sb))t;dm>RQbJgp2XL6J0?kXm_*us28&9|%l}SZAR)+#jU@-Vq*; zoFl5Yw?GNjudlrHLOt1N7xH2^Q-fj@JK1t`P*gQ?#fn~2y!iaJ8`|reDVeL^*I2tf z;A?7RGGpsxOksXomzqPX+v*#Thj(l9`bh~df1?lS&jSrNVG_nL*<2c!B%v<#A|&dT z)I$4da}u+F}YhNUgY_$||q7hiHIsxc@+)c!HK$?j-t3dAS^@|j$da_T!a zL!U`+)>W4_lR8&UlC&^N8kD+RT9t@Is3c5!o%M9y-ptRFm@^@vXhmtRTyJ&B@?bJ6 zkbt;J48=k|D|fXaTlNIzP*)0%r?Y)K40{a4n4A!EVTIZ|F-7S88L_SA{+(Q*lvjSO z?=EGZxzm{&F0^KQn~z#L32EjaeR2!=70}$9CrJ|}qC$wM%GU%>$tSq!fPJa<)LOd) zvsrWNR-bf@t&XyVLeJAZiMA9sw#tTBRIO4-IV_;kNy!EbYwfbkisO~xiF5|(et~HE zuTw-WdfbM7+__z(c;{_5`O=dU0CK2zcD1)?zOmpy&i)W5ylkP?;|U`?N9MxDYZJa; zOn4HLEz46x1DmpO^Ljfh(!E69?8=LX`VtEZH#BO5124`DYIQ^zcs=ch*BM zALlxRIOZ%HLa&6mnZ7zk{TF1tW_+dY74d(hgk_3ISYM?x;IJQ~OfX)wf{$&jQRlNJO#8)SC{rAP-KS?60xO^=7uf(YT zIT_DX?>A!6KZ5;&cJjMQDt?_JlxH*l>tpEu{gVEM3F!BLPrLf@8|nN|`*&3_=KB9F z2LFES3#6k`oWh@G`d6?8)UN&~B)!`As|qmFPsP4aJNi!SgQY|56BTz>{p&{@*^d56 zsgOnVP&>@^KNX|?_ht(s_5N5DVWtn_fnEK>DS~3LB9vz{eOwIr<9EIJq29ZxV$Ae^ zf_}EEe?_XGxPOB7kH(PS;9NmJPgN|>=K8-DLw|=P{Y3ivVGR3yxvY5C1pJ?h(f&u~ z3x?HxZq*Dk|GpUdTX}_`cwmD1e+V=?`47ph;7JqEe>X<`@5rsUZHjSuHuL{@4Efci z3xfM6pwEk8KVFmcZ4>ZM`xx!oU$aP5JUBu9TVm+nR}II05}LG3Ni5B)$I5jEXYz{~4D({}q{{;)(R9 zE{6SkQqu3Afd9f6{a2YK2q&69ZjWLA4oiBqk6*E2ZvVy@{r8?UyeAZ)Je%n+i$Ond zognZ|p#P`P!FKxd-V#BdH$nYBjNyM|TrUXK_XbokX8vcysQ-3Z@d-sJ&u03YW6;l& zUv)S<0ew;o`}KsRSKmib#hB}V7Jh=A{GKZk6l$MJEc&}+&>txl6lz~XEc(a6kX`+C zHwp@U&jUfs{O^k)zb7UA<*H(NHq+;za=ZFplJxIPK>sy7x1&dHB7T_bGv61+pnp!% zpBBZ0Kh5;C&akWh5xJR3eIG^@V5YCb13UV~%LK(@MJUf^`pc#L1RGJu?UEkmG3qei z{}Tua<}?*hH%a;rKu7#%N7cfZqHi<736~CaJ}l=i0gZf~mi0#<;!4pe_#A0}P3$v8 ze>WRS!p~SGs7}kpeJgHh|DmE+e&HS9-`poeIVv|1DLgM=Fm)FCZuFlW zeUcn_FM);*Fa8P7T>qUY)Q)~w(yM+Uda!DtUmK(Td4B|C96JBBBsAB5U5xs(CB0rh zb%mKe56s)~UuRQ4(VFSsj8XqKNnZvUI=uLYskYG5Z#3D}e?rph@~0)8x&A#d>VH^n z{@Ml_I=uLYskG4F524!CpCmVl>GDTcS?K>Ajk2T9ko3CzNtMm~8_<6{`r9S_1E8V9 zi+@^Dn(4odPPL;Sw&9NRtIDpdi!ZfXQsao06Y45bwnMpOI}>llFUs1 zZVdj*>VS+xub-~X^km=d>aVLK>gf7uNoJ=1WeofAKpjy>_LO>Yy#W7a`hNt#uKt(m zfQ&}Vf z9i#vKjX=hs*MBAM&Gdg2WBfcU>2>*CWkdgq81j4EhW=_B`oE7ce+=5tf6|7&7!8U8 z+2bNfFR!+woQ`Z8It7v~Hc`kWa3=k*CfUH-HrG}m8^^_w04Z8r2-HuURb z^xt7gulL`zHuR}6=%28upL|I(|Nn?d$&UYF8~W>P=>N`XPd{%1ka4^tPRxrlEeXx_ zyJOVv+9(M1_Fr#9UlD^o-G-j}(p-N+4Es|g>Gl5i*wFt2oB})f`EBTPZ0NgU&_68c z%Robi7yr5VH}iiT?2BFfLpJR%u%RcO?C3{r=!FB{uY{G5(=~mSb150Bth8oRj7_jDLN0deoucifi#%keA2ta*FchRbBkf z(u$Q1BT}M)Z<0su!c|0lp}@<*URK3!=D6fjsWmA1Mxx(_*x7e2VnKwc>a z@|-#Q#*n|&^`E^}8OV2Q_%lXb_}z5mRXaw%MGnFT@ZXF7{dLG4K{+WxGN(P|?n=YY z44k==^)53+Zb`yRj~o4)K?ick>|t9@AkV}H)4ai#9dkzNa1A(eI^uWh@f+Rv*$NWd z{$0rNHTVf{Q8#k9A_vv?)A2jmuJjW>IplDS_;G#6QFs0soV$~e4}6aICzRvBHJ!bX zf&7Vk@cZQW{qPqk7aD$-d+&7o1SsV`a}nRhi5rJ^A_vUfbAxq`T{r4^3m^lYKg*SL zxaur$;_=(w$a!-<0Nira!SgiEI`s2R$Y*}-bar#R5r1mc)u#@WPj@F|o^jlqiF`Ev zPqMxd{9ZlrN_ljTpnWB*?tBA&J3J%F%4;U&ZPR#l&~KTW>UVFOeEqJgSRc<9aN)KW zk01xjaA-I3P`i5A$&Lp(+=d69J5KVD4ta4r$^S5Z>6CJ8xON?Gr}lgk_vyGN9ONj- z;<(Ta%B?)~r9%to-Yz!_@~&s8{4B^PU#N7edy{|EjlB1n&@<$8@1ee*3%x7E{e7G6 z8g>207psopx5{^+PwDsEUvwSvBll*vDn?7NXSEgt#P5}@yi(D&)c@$KyWEb@D2%!QusUcmL;wSWzf9zcgyCnEPc=|1K8 zosL}Wsa}S>m5EuTm;SYjdnmWxS?BcC@%UZ*a_C2_-( zTerC99DZm2b!TS(f8yD>ne6zyA7-3#t($ew-M6~0(3sm(W2E%#Mjlr;U|G{wkM4D{ zfdpJ{_Tl&6k(0C*<6+ge_a0a^w)CMa zLN(OSevCIa+GEZShNVUd=^j0Y&E&aa=$^(?-|jm`)6i#2LAMJ(kT|wu4aQvs#@}j; z!&MlM<;Y)H)`#)Q?JxDw#~G{cS-+#Pikvu;9;;V4CN);yNs2vIr=nd^V>PqXI#$2D zA~IGz!uORm?jdgkkA2O>BwnK-j963iY zr_((C!EMj*+$58!ZxCfFzk=5(eGA1$9rq_9InQ+-h_6>+yNI_~JcOQcz?YZ;+cgz7 zY#MBt3pULSUm_kkf*rkO@Fj-fb0*9IAMj_$zi~5n&&V~%HH2I$@k2rul#68$b4Ln` z@0pLe`m%SvSakvO*Tay{SJs@7Kg56L7F37Yvvzx{ljfR z7xK^yuAP678JG2(S41&+D^{jURcuWER!?0Jsl6(@6#m-nfE;#6dJrF!DILMyH z*bnnaa-PF^{M7_J-oSYj`f|p3dH)6Qc#qB_))QC<1kYYWzNmYo9&^pGV?KPrRp^&; z^iLW3=_d5oO3XE-eWXw5!|;6b?#xe&j_oku2V&jx@)xTdQ}=~+brJgROjsY5_Tl|k zuj3bAtRmRE)0T}wPf|kgvEBtwV>^&1db!bCjsEvju5a?gcYk8k?PSN4&&zXI`u(Hj z(1&=)^D)TzD&(_FK;NTZw!AT$9Z!UQ>|t34*Q9*m=T4T`la9QOXPl~UUHxy)B!7nS z{zU9GrR;+LMSNeL*HI21pVn=ZYn{rY@-wl1aiLBsy8>mdg3s^<${>GgG1fAxu6~ZM zV-mp6RandHVb>mXBIi^(^_Nz!s*~E*xBHLzy#6SB1j^O;F7yg=2+LI20a|~MzmSf+ zL2S%Dh+OUYeBB`JgnFKZe17rBW%nICCtSkObDL5A!&l&cbGgG8r}5c+F|y8~++hx^ zbK)V7iPnXSu+E{JVhbS~$_J)$f4kJWiSmqsZ-##1yh5gYjY(@wtTE#uGmPg`nO8r1 zAY-~Kfn70u7U6}QWEULJGbi*d4th5Q`iH#XJEmbxZS+1plhMyrlj zeHmkK1UQe(^aj6+I-dvLQPg=9bv|XpZ+;Xx-w+Z@`7Y{u9(Ap|^7#X&QP)vp=BcAs zpFVJO`iz95sE2Tuf~P}{gpr=<$TNsGG=G0--wdE1?gi(2Et&kL9pK%le=%T+gZR+~{wzUBi@z6ZN~%_jK>TJ@sJ;asl>XjGu!( z(3}E(dPtXOZXtc5vFq>$Mybs-zjR{^Q(K*wTd1vND3f^1K$|okQ`}giksiR9)F>Op zU@M?oDO4xv2GvLL57H5;Z!>c2QeAG;?}DzZ#&}34orZi|&>1&)A=y-brkiCRT#P!M z&~+Ob#k8Fyj}qn{A^Vwu^&ZVfDh{y>GRdH^YO<@}+09;XfcH#kTSvZR^by}=UtN&J z2<%^iw5?8-0K4i6+tm=}pr@Rl7*#ed4eh7(E5$YfkPYQz#Cx_BC8JE(1hTPIUk2n- z4!dv;<)iLXWFM4mpnf4)JO7eBnv(lO54ioMg6Mo!_Ge*os5mOi?z72_EpV+ zy?75}l;UwBAD*M1azLX`_n$x-COt-?;v*E-q30ihCWL%(Ud*9G&Q#c2kw=eVtvr0O z?!~#4$T@t3ZHI7zZ(;0_T;Aa4xx9Q)Us^c-9zA~z&xtQYi7CdDqF-D5xtZVKaYx;M z930P|b3IsP=g+yW-B-o^N|Ub_%cs-l8sVq1{jS-97sMyCs179y;(tHO(Aa!0)IXrTL9w7pZtA>T&f@9zntz)aJWE z)UD>bL7O<}Nj%S#aXp&v#9SxJAzqanlwTcshGM{Ruv741c3{rJTz(7um?N0$MgDN) z3f_Ii=uiUddo_UtU%L!GCB;>~#fmd9PFT9pOTHlG{H}%FBK;xTLjM0A}PkURdf7{p@Bnk?Qv$dH~2K>BD{`BAzy~dBv?Jhf$Fp2Zz0z& zV$P{a->52pee-YHI{Myaku`*hP1XbMve7pVhrXHhkYv}l`;WMv1Rmfgt%Q6_F%PWh zgS@%zCw?sXOyfxA3^5?1A2J!Bobyj8U^b z=#P4mH}U|JeY+35?Y!t3qmB?&Yy4<1E3M-ZDrZ1)uOrx*G(FoOQZU><0K zTsC7~=vl~u?;zgS2H)y!jLB12r!9tD*5LmwXHx&$c)jC7yQxh-HdwGNfejG6m9c{B zfrEEDDQzfg7XDvAxz{5f?o0;@eii)|g8zcon<;ODhgpk|%R1Q`d>eHR8M98^Hx2oz zlZ+9pA8Wk8NxR7UsGJj6-z-EP=9%~xcGNh!5;!=3v$yskx#pR!LH+o{T^#Dj8m^U zugq$=u>8e_*Y17rN{6%O{)Im};KTU+HjC@`U6?xfmyjRPtY?nzI8s>f%!eV2Rd4X{ zhoNJ2pg-eG&tjlwpucb7(+4~b1{=1pelKo<c@0_H_h4>v_oazP`3#6Xehg8PM8#){l}7 zrZHpW{2cg|7#n9m`*bQBI0IjI1oPQxysLTz>)=K3kCuV=rO3zp3gr0~^Z@wBmLP}t znd7YQEyTr=KJR^KMp4qix8^gF{js+|_dMkMJoM?u#`II9D{nck%Q`x?{KccM-}~TO z3!gu*&zRoxrgKUESD>@c8`JlGWo+i)_aLv$Xn*aP^E~+-!{Cej!*Ai7X~gwr{`? z;IU+k^_8FzM6%mj~*Eqv}k06yi2b8-J-@VL)N*t=Mc zD-ZhNI+Xt=5jSp4-^^nu!=#Qhod)r7l)r>Ln zIOuN1b%|rf$YR*I>!z^a_291`eUa^kuLhr={B`1+_@`$j_kwpk!+1RQWAOeMcwYkE z-*hg{+6UhEf%hLT{PBU!Mq3-}!YuWvz)iNJ4qH#D#s|Nn$~pMy@H zhTd&~jA)&W_jp{MCvl!3*o#fihcxH^VXO)bnHC-MbEP#J7wYJugF^&wu+X z2cCyKDQ`RZzR5YPq7q8;2mkU9{^kD$|I+^(_Ae`?e>sAf`gs0jfC_QCeG5R#jSEwyt6YHpZ5%t4`0zE$|e3GSeHH z@N28-_zmO^{Ay`>@sjK%+36WgOTc4ZTP~5nosHoUe zv|)p#A4ac9yJv4Q{>ud2}PyAElvH!E=11SeZj`+nudF~SM(}8?#gn>~frcDq*}`;qoCjN%Nk3?v*su2A4zJ6w!GCSyR0n4`(GqYFC9U z;|3~U<2@a>hz;}OG5WOWZz^-2#39+!L9oLnSUO+>64s6DNIg<703sQ@BysK}9Fq*v z0UDLh|AL-}d5x4oIG2;&;kbv|W9Atks>dsFSX7&-oJ9V-PClpK2#Kl(x75B}^7&%P zPdZ@xCsA+sJM}T@eMy$_Sd4l{P4%K$+v9fd701qp%n$#AcE9s)lSV&zj>EH}fw=n& zhlh9kho8^q@a*utx@hNcMCeS%EgVkYLWw?};PC0uK-|5=;q%&-^x51%Cvh~~+FntPwY>svZLfe^+biJK_6m5c_FBigz_*Tf0k@8K0k@8K0k@8K z0k@8K0k@8K0k@8K0k@8K0k@8K0Z*rs<@iJHc_9)f_a-gkfu38(HDNjVw?ysmt@D?_ zx6WU9x5MXmb197&63HxLZ`H{BArO;U#C~$W1+c%xJe|za!^J!%ufBk@p1Yd+LV*1v zhGH^{*lkt*XYkB!l-}zg{D^(#{t$3)OR&dJ1ioE4-$lFZ;LpLtEYjqu{G0-q=0%$P zxu1f1*Mi|iEMm_U{sWlO7V&RS0|^KBr*PiMonh<_Va+;hJl zmz!dgQ;v4o)l0?M!MDf2UlpVL`(ofPivd?HqTGaQWE1F=EwAU8;CT{GxdZQ(*K}H; z!MAB}Ww!|b^BR1ghQC9Dzb3DVpU9|ZY*@l6SE3GA-Lk}Q$nYK#*|=@r3m57#PwlYwptzFC8>kkXZb-C&K(5986{1?L4<)+&iU4Is9{OEiteWr4B_-h*eom#!R zd_Jeam48a*==H`)92!SAY4}QiXg<~9lac3Et(^af^1MUiUzg`+HTV(uCv=P_&kBLU zKXrNPaHZ#z+ft7@dL=Ev^=OY8YXnz1L#TcYUtbm8qrsQRYdRg4*Hq5+@|sSkHF&lL zSAHVldo=h}xTk~QIpUIkmTB-@4X*T`@bfhIehoiggFmjp3pDtTGn|QiI>1!J9PrjT&72b}5xZYgl#sg@#|EfH*&)!Mz&% z7aDx22EU-emuc{Hr?{bdmuv798eHi%JytpF3BOdszhA>&slgxC;AI;8Ney1E!GEE_ zS84DI8ho_|XUHt$YZ*65r!~dlWD}LXkpEt@PyW)<(-M*Su%4Tk0`eOrAQ-q@cegwmsBIwp>egE1Eso#l`s^PeD#$VIlEb5%9?W zy#M*!7x6Bp40Q#i+k6dOoxXK#zV`aga6V&Cp~5gf$c-|;%33T7AYnH0pVn_Huh`_t zC4mLT&8hi&P`KZ6qqB2zWvgkl*Q50^PpjE@da`9!PfQG+>=xf$D#@osV4dsNd$N&F z+}FMVW}u*mU$)dYd5Y>cZp6e?-`Uz;D--&buZ{MQY&x(sN2FJ+_1CvFHeu#feNKZp zuhx^V`g~ufB7Q+_NGHh9?*+P_G^*&K==Hb#UbB(J(=wfsk~3lc2As;HVgR3IQ9=N#Mj)H zuUc4V?Lmn0r9aCUDE9vYIj^a!&f<%G{LKIM3iNxH{_m>*zspMaEezKwl>B#@%imto zXPnuuVBEuG!)* z0xg4%&vhD!C)aH8Syl!epX($NPp;YG%e?s@!^NMg=6Z_6lWVs4TWvg@KGzW?o?NrV zeffaOc@8hz1(i&yBCQk_=VUq z!}yt-<&1=gl)Jlk+}+P zd~6R4P5%fVASBKIng87Q699zezvNs$j_kh^4sQJYS@J&%?Sdg;{?Gj9#{XuP^cUIq zV>40Bog4oZ)O%R|`As&4Q`H~8@f{Yw+s02CE}8$_^jE?VgvFl*oI&;l`oDtue^HkB zE384He(B(*e@&MB*A4MwQNML?ia`0-`mi{#mj z-ye25EdB<&Y4cR_d(Osp<=1ktpW>z~>9wixNBA`ZDuN4@dBBu(Mp zykz6AaBztySAKg_?loz0oc@)$cWIZ~_>TH`5-4fXFNL2|F;3Ul$={fpUa!Y{8tL9|8-D-yM=ZO|l#Ru`KB) z?PiyCh{M42X;R(z{2iA59vffhe|SjzafloizstriM;r#GPn~ho|69mCEPjdIl%vzX zAS8a>pz!!rHoh+Zr$XY7&JzDM8@~l{7?}T0+;i< zy6OKISXln|*!WeJBlABNe{TF$S@dVb3_qey|H6>?>2EiJj}AEd-h>tlGX0D2=cc~| z_S59wi65QmN7U(mHYENZ!v6@1KhMV3`d=6le-rF;Sp0^N`0P^M{9g(GAuRqb8^6Ub zhS7g^X>R;4!G8#gf6FXCV!4XXF3pX9vGgB~{F-fiUH(f#;`5cIu=HOFi9bFh{u1dw zY(CSM+5X*#T>r}Y=RQg||CgfvEs0NP{~uus{+1ob$is--*z4_B_*+%8m^-1yV^>t4 zPx*y?yKmZ*NmCOhoc_1_MvTlPnl!gbe12|}$8S0G;C$om${d{2n~U>$`{6uf|GN%y z9`c(*yrw6;=J`Csncw+9^z98epDXU++Z-Drtp|rB(S#cAk9*Ubc#dxr=O7k5<(0;o zz0!u9NK;PtH_i7W7%zphUt_)B#P=SWSFQb-*K}u2Yzxn%-SM@Y)*Y+7^*g>CY27jA ztfKv2E$rs`hMi+F@l!ans{SnG72i$~UI+7&O}!uGwD#WXttX$}??qaB$DCdCnvKsm zJfCv?nCVTEac*NGH^Z;%T(4(cp4W5itO&QG+Ppn%@+SEz+nCc@w%S`?RvBq6`^;HI z|LE}5`NO%RCU5J(bMmjfG|$t`qs|Y@4n5n3J>lm2QusbrG0#^%cs9>yj~yCq&MS}O zn;)}LXR}auGjZ1bjMg5U;oIlC6oY(u^Njeiv5{8FC^j&|dod4N~Gn*qgLy>ile6>x%S(f;Y$b5d^ zM$Vi*fHS^t!kLkz&wVmkN?guG{tjpl;9D;|9~t!!7d(Al@@eR3toM)SQ#Ye1FXgio zuSDQ1_Tsz2kNFz*6u&*uF#q|FDx)1AqxgQ`&U0`c zE6%lT`lFlyThi_Qa2MShJL^=u zRYtaNA&=BCesh8{V_o-tv?KZk{&YKuK+dOXBkpaI=Y9Zf(|K*kLKv~i6#ifl^;VkYd#3f&Thd|nqoOvHb-eagkw)Ht^r@Sx58Mgc# z+mO7U=hbgPdU?-Ozf$=x1uJX)Hnh{&1kgs$fvv{b#nJ1Ew|Jv*MmKE*ct-IocKpk) z6qVk6fAqS9p}|?>TOy-eNxY3b2)1urP zt=y^GlzTz1$lceys{Y8?|6AG4#=7|KMMUS0zKm~P6}t~w&NMwSb6UG*B-od z$%r_yrE1v<@ z_nW)Ye^6(JP_LjvQR+}+$dk{`+HLwQT`~-3*LzD@wVgJEEt1t!*^Ou-de%4 zMcDwh|1;@NTEOQ=I&Vxn;-}4Xxi{v-4z;5_viHOX%=at#BvdlSbaq}*F5|0@s`xk>%*n8nI3csP3CamNC zea_w*XvT<*{$sBS#w1&M2|U_m>AHCB!^c0r4QNawQIA=qB3j)kn z{N+r95s+todc=*UVtHf91b8N&V=Z28@JxBTEdB$-C!=p-OYSrD8MzsI0qlgK&(zCg z%cmXr9Gu?KlF?v(WHn#O!L)0{c@Qk}RlOVg2fV`InR+yKj5zp*wSB$=9ESfR;)LOU zl?9Jw;UneayR^lKE1}Nb5Whq~T=!(bqo8AO>2J$|KW=fp`^>pn3C$M&w1dBHaq{O} zk_3@~%ZK)s!R0f}hfO{?rzK&Qy>aOmS)A|qYx->#XMQ-hB%$5j5ZCfNtZ*$)={uNz z&M~J0elxKCHIDIx32wUgS=^OFqs85FC7`zq+ar58LK6LXh_3q+VWTBTdPM5#f3Aeln%gE&;@^R}+^v%uZcFV_=&+7`;@+rwu z?@wi^U)n1MH(hymo9Sx)(q7zhi5y%$4Tig~KU)8vvA9eBqQzZ#zGQJ%Zlki4cUqQm zjk0|B4z=C>VUSr0mogH%Md4gqlTc@G$cJlY61LhK;#>=p&}MIli>wH}rttIaJuin9 zuBK+w3&G^WwJr(c@SB0Sl+geudpCryCCW=ie5Fsnfi!#G$zSZAWFzABF1&{{VbbE- zWs9!DPU#tS%NO2ps<*WJ#>Km|E7#PluD`zK&INq0G3)!mY9jn&zcaadVX9_P<@(y1 zx<%PvTb+W9<2G-8PUfxU`;n)3aZC0}Pnd!iO#4J%Ixq0<)~cm-nfJ9$_Quz8ya8K1 zKCo-MuiU4SXu(%!d*g3ky*fZVcfy3Frh2Z!n|$j)WL_f;e|fkhllPDLhS8~CPYTc5 zBvvP0X!11(xeL!&-vUpVQoU$V#qz6`uexb|4+PiAGaIpn)_R*hA_?IywHV@&ku|Iu6sW*1rc=O*h-d)E3rt-fj z{BJVNq+b5P70RON19c*mpnk-FD#xpk7#!LE_!2i-e%Y#3L ze|xg{oz|WSy&E5M-l6fuUzscs?bVI{{Vee-tY4#TG+DUuug{WyIb&qqDboKMo`;qH zZJWM~GlY$s{){Z~XW1cxjBB#R&(Bi+akfG$B%-~#>60uh{|z?&*i*#+CL)K$f6B&} z@tv@7)1R9q|1EaND&wnc@%PyDt+<8E8)b*EZhG$T`w)>yK@IwZgpF^rk$&d>?n3$} zi%;3;;HLjmJS2_y)j})juMogqg`ZsC0WSBNbeqk;#(3}2_FBx9pTu|f^t+(~j-1t{#$^Sg-e77SG1JkEUxbc4jEG+#&)=Agtvr63f zvygwc?Sd@r%#U!GQ(ih;I&qTOVIlwd`!M`pVS6$rl}wxFKRx|D8G|I#NH}Q{Phu|T zJl4myLaeDJu=d}Hb#bn<>F;&(`Gt7il^ZF38*7!nz#0|5!`+$N58p437V|sY&0fK; zNLL(5mTtznAfL~n} zwQyDK3dXwJJo(RFs6gfi;g3Oow}zP55UZ+Yyfw~H;tXS6oo|7VZmxaqsFg+gUarE= zu(|u~5OhY9GjTQ1eb{NBu#Xi)PJQpaY`m%Ssf|iTL#a_!RP?6auh_*?TWsX;F;(a zZqs2A?!xdpvfyP|aAC+f6um+t`Q@C8#-$!ye4u6I@{v5c^ph36&=8tt@nJ|;C&2I^*D@bQO&AVUYgFFyf%K>^7@+bS6_eAC9CT(T=y0)u3NJ@2dJd^$7xWr=Uuz7cEQ9-{KJ7y)pe_Csu!&{4{H_$=37oW zay}XBY_lRNKmL(7t4=yMGI0Xt$tp7mNt}6CtyRU~=o$lYyrFK!*=8i(cR0?0xcl1q z?_w-o3_XD=C=2Qw{uetS5BS}Hlf@UiCG>9mf5C&W_|jg*9%hTrxMA@R+X!xY z?(cj=rVYzjU_as!8(-|3{h9U!<@dd~Iaz$>O$YZk=hlf$)I_+K^k+M`#Fy(-JmwqIe#E<96(lX{MUem#c#1-%9exisUvRu)mh@>JMSjw^uxw79iVb` zj}dRN@j2I^gY(n7bn-+QMwsiV#uC$};v_XgdHQ3C?(;LLCQW9N-amQ&X!M5lunm8t zpe^|mdp`r$KezYP;UC!ht8x9Iyj5Tz_Tn+4uef{vd0!{||crOA8|2%Ff|C zMsiGW;;wzz8hdeRA5OLr6jMv1y zal3P(TRuM;d(7YRS`RM7{#NWkjP?3^iLnRH>_I$}y?*3#zTL~ny?!6qJ@%9j^8(rS zFh2*dXBw5*gYTvEH01a)>B4?+=x192u-NEiX)U%+k-h+X_u9tzd(`lLTaUX}mHX7r zWDgzpB|fC~skLA%h_d&TBM(FRdp$q=hS$ygXq5fI;mAjNuR8a8UyVAt3UxIbbv6ro z1826P4x3oltltmM8`bUZ5quBz#=2tN6(Y}J3Ik33@jv!@gL-G>!IKaM5W$Gn`Tj-2QVEN8UP>-i3JxByq~9gQZu zo*nmk7?%%2{DR^)QGeXm%CZvEd-gs&dsa8)@B#K`lBOH`ox|jD46>lU4o&!VTh`C( z`NZ}hQWmyuXfF3gH?KnBmy4!ub z*R(HZ;L&F6t*p-(x~0?Z>E4d?u?|oi@9k}Cd^fqC`&1h*g1$rksU=goH;qC29_{sP z#&r|kGr0!!SAL<_Q+Bu4dMD)Hh<)cfzUuAW@ouD)*p83-w>B1|PQDgt?OL+7yW{`# z?ryl;pUe1X$eVnYkS}=s@h14pulO41XbSSAjM>iF?$->2enR$?H++uxJD5Y-G9G*PVLKYfLq1n}tDx5vl(Xpd ztNpy)H-R<`7ZA_f}A@*WfpwU5o3+{@&3>yt6^Nr`Uc`C)62t zPjt`OvrSu%d64VaWKUTna_C8vMb zDn)x7vXk?JuuXCHAGEiuyDHm0NY`N6ZHCT$Zz4MQiTSa7KOaTNOB{I^g8r6vyB7UM zO6`+AQ~ghY&l=VLJOF)NfOi?*r>s#1-_9BzV*6@5!#?fe1$gfWer23AjbpqfwvmRZ zGmKAB1)0(wlTO~3XhxmMI}>4c4KgmKe8dKSQudej59?fQ=o5-pBTu%yq95_g$LGI7 zzAJ3`NXNdn-RRs*^tCh4*G_L0o0X+3rf>bE%hHIv7DR>|YR7vqDcnQ1+W4MNtiS)h zkI`+|C+1({;Ylc+##2 zPqg(m>EqZom>;~CQCxrT^q!%Sp~Y)+2HL#De`k5wpUzACkj_gzeVGcsufR-lT>9l> zxx;tf4Be~RJiX`PG4N$ztD?1QyQ9!GvFCioy3l24$%z)5alqL-Muyd66n+bR>AVy0 zolgviz5p4cKbcH>MtcZj)W$W5;=$*X7QqhT{kq+3uY4Zu=iL~+J9h#0uue2-a72snj0o zg&7;5ybqxc9)|q8j4lj2^el9t*Xjc1664f`(YVrgrcQj^cThe+JqT~sgYce;v>Wqg z?v0`^Y0S&r+X0#HxP4$L$3~6!MtU$li&M@w{;kFv8_-J zg}Fnv#Ew_JQqT{dn2C6MV=-@i=UHCGYS_(??1e+ZK;Ah4xkK78|`s@Eygub z(8NF+pXBun#XCxmpXfz<8IE%LbY0N7qES|_bd31)#%UZlOIHyq;dN^@yA&= zpHv^0!q|kiuO8zP-~XP0GEGOBes0Fm1;@SCf`R_KcG@SPE%eZ)Qbw0OV@ozFyT*54 zslz+$yRSRa?<>Xw?@?`6@4i-{&V(m*Sa>F(!>B`}!}#{)-e>|o(d~FY2|AoY{;O|K zmSP?yJ{sv(1J`SkVef|xL*7{i%GghDSfC8_BUpxgwha5yWf&SL!){fEwdpbpGxku* zfHq^wP;ig8GJ^34+hKHc@NX!vg3<7Ckv_``|DkO-?C53idwe@O)acRs&{@n6wN;_7 z*+X6^Q?&L@V@I2Zni;y8~b&s~G@}8HgleBS_{UR%A z)4qfLwj6dd2D_QJbW}dR$v3eK^E7iJpPKl7?v{~7qrJf>$I57=Vg9fQn?_!Wv@81! zS~-L5mhmwEP!#c+^6oxsLD3hM>?sD_fI$uOuZhI2U2@?sM_wHBZX9so3nOb|u>s|g z=zw=}^RAt8qj%%5NM0V+J>r#K*~)U{p|W4`%G<%SvfrqcQH%v+^X{3S*V!n{mMa?|Hmi z`U`*Uy$kQf(k2(;9l&Vu*7;Si-7;45{aoMXC7^$7NAI0I!;C3p-mMX7b3DLwF-vFC zqWyB`&|KY_6NP>*cw+vwqmp|WKk2n!$+B!kf0-9;Fy$Es96EGV$}}gk>Cq+qBMa_` zaQb(kNz0Vuo_nsyL%9Z^Trrf9=}Wm9@m^&;`>=6RHhGVmV^aS zs0Rh9!tUrzPs}e!6?Ye;F77sUluC8SFxSX2lNncF%rpe^2BbZJd`umY#6FNAJIjve zKz+RJ2hiWHgnu+&Jqj#yYdIWPRkJ{#b9Uzl}=@yEiT=>Q3SO0?K&f64aq1W2V=FakJ<()7XK! zkn!fG{OLVxgQgu|uDlksn-J%DTIeMIrrx#Q(e|^wieuN0 zc>dNUhTM_p1L*JiKI#X#7wmit`uxwJ>A+mo2KW(Oe3u`7 z>l>K&d;@*q8|Vk$z?@tM`nWeR@7Z|AdNU{2Zha4&5rw&*48NaX-V^&^;#KLi#E%)a z^H;y=ZDV^GhB?5ykXCGEa{cp2Zy(ayhqO2c*n+h8A-xu)w-4#1Y&>|xZvT~@w}#y? z?Fs!)m_6W_f$ixH>uS4Fz@R=K77iwwqpw~R#j%XCdvxyOVL+kc)9;emDmB*m**|q|?ASM$+J%?@air zoc~4N*=OFDbH2IA&&lR~H;qRh1bfW6c0I?=d$&(@>w*10`jGB}xuc6&4^tw^`(e-0 z^W8WnCOzMMrsZx#xrHakZ^Bc`U2Dr-tLD2;Rc_MO+H$8*ZnvD#BhyQxd03k)^ydQT zOFw&-KVJ|%f^{^kr4`;Cy&!h%59ebo?T)U8cZ`gJCKdH6?&5k5wEQOsFfTi&<1ksQ5MO&BMQ72V-Vig;#m>KA=r8lpM!gj)nXWX z^v=g37*702O#45!S$!r+BiigHwAB>a=mTi0Y^QoWD!z%HgNUMh z%pd1f8?dHSdy&7Ulo|tDg|cr!?ZcxQ*yv zHllCI$vOEtMhfdnjGwY=N-3-<={X15=hP|IIGBHvchsfW(IV6#>ylTg%QDm(X$w_d z76B9c%esu%x)dAC^<%aJjIYd^S9C*vGoB2qWB7|ZW9LlorcI#k(cV>J%{gIgQEX=s z^dokHb|UZ4X!wAU69YQ7U~ct8^q0k0+pCA2#r)3EcGSUxsKY|oOxV6pvD^2c?TX#z8sM4EX|?2p_XV_t zVi##sv`zesk$w0YAn9luXgBYGUF6y;?IPD+n~~>KF?0oOo^w9f`x&o=uJXP%C-y=S z<|S&OtF%GX;i4D2O6IcCLka4x1k|^IV?aACx<5 z1DMY-{XwPY_Xm8QhyEaj{@@z)2XnqqyQdiaK@@XXQOsRMZGXV|st%N+()0TRnXh79 zMD6?(Y_*x6;+#|pIvd3t7TaXG?GK`ykD6Dw<$3B9&MG^GcPgTT;#-D~_S@Y-v^#x{ zALlU7#P{q1pZAqd^(xX^1D$Fa!?BjXX9e$V6nEsn@3rF)<`e6nJE@z*NT+SM%qfQ1 zpmNx&gS|Vjr-OT`Xw%V8=QfUn-mK)>N*CmYI`6Ya^MS300^2Y1`N`^Gn5~Bb+b@(~T3U+wnuAq1)Um@p1K!uaC+B&4sUyF_c=V00c}Ek2y^5~fWY5UK!_3-IImV-`lN7rxJPCfwEk1oMTPc45T{>?FNf4aWl4*u_pGIaGbg0YZ!CFFGxWa zG0eZyZ-~~`c0)I?X2ksf{#cXtrVC~J32YhG6o<=rl>K8j#?kNN?}GDYVV}te_}^dZ z-lTkPlr!yfpXnZxM&wU;(&rYQtxo^as(kK~^)IB8{^k3&e`&G(OFQlO`t+<$LMgNRv^yiYumy8o1 z{HW@cji_7ttyl}{*@*cY`n#mCYYXJzG=uk-7DgM4br_ z(X-~$sGo0#)X#cVKa>^a(eh#M?w_J9u&!9nf5d&5zkoKpO?-K*kA9+Kjuh-DeTxG4 z*D180cGMsDE5^Q6*}Vk5c2Ok0g=3@X=(j2NZ2cwcBZhHd8RmJbL0b!ZVAd5*F>b{f z%9!`u5lz=K+AQmsbE0gcVfD=V)qS?q8T)SB$0d&JkC(mm^oM;~vxX6O?JCV&Bqhv+zk{Jm(f;q%w>*QmC^; zLuEH*M4z)3zsp7>-$k16(;_?Jdt$yOaxL1qiBlZw7&+&=(F^j>_OXVt67xup;;i%+ zqLIi89@Z4HjXlb}+$}i6a;NuXq>1lM%RY6MufQ8}=ys&XxqhyvcZ2SI$e!?hU|!$f-$Fd`5%xUi4BHa>lSmWv_=U?N(amenhM$KX{xWCC z3v-Y+oUyz)8X5jV3(or8krOfRPekv&>J_euNAFJFAC2vQv~|YJy%@I+W;x4I&f#B* zKh`=UcQ4bMIldtgf`*zy(Wc@F!M2-fCMSA0GPXPT1-_FnWX8Hs%?k@yR+ z>o=zIaL(>XZ!gClu(g3cmT}ld(J%G%&@SHaKQYhb?+0L8tVnt@Q#1Oz^2AgF?F@MCDId*E8pM-Bb4S(qKjEySrMjZOgP;)G)vEMjvcV`0gOypM;$Nbg6 z#`)YI692`j`{_?7U{j(ku*vh1dubmVVOKW6t~6M?(me*>#>n+{Z-+i?L_M%RbX|yz z;j<*3u|0J`&)o7h48q(m@*hFIafLtKyo!0EuibW@*Omhu-v?Ob`<}iZ?%xlCo%P3r z4R(((#sxiZAe|kQA_gYyOctZ~*^DvM4SF7%C z!TcQOL^uagg}AK~v1b+Sn|XP1sxNo&n(WWV__Dvr*b&ap_`h$&Z}MS(wFx|Sj7+{e zoxF15fqsgsov0c_-x zkU#u{Z65684C_BMqkRyEjE$d6SyNxCkpHEqN4^)>a{<3;6MQdFzG*WIaj|#4iF<*= z$~_6Z&ScEo0r|b6=H%i?rxAIjo^u{KaW3==d1X8P5IEPUhhToA6YmrDzTvfYA%3Tg zpTanY^MYqG&LJQDjgR-7b!kZ-a_;Hu*D(J*XyF1t;Lo zR%3n^`?*M4b(S{;b%?WMu#f8`d(=2CC4FZmpEyX<33kG%PuZ5-EaDCESECjDnU}DB5Oa-foHOS0 zeXzUmlZt73o2~6_gY9iZ89I#ZjVs$5KZJL6E81Xt8tXOchGV~EJKh^WA8>R* zva35ySUwfgB5SVJrC@^YBT;?aV#jv>}ug z^W|ty|R?D z_6jj?2f-g@=#g}*u$IhxHQ*}mWRN%WNdC-20(-iAU(2`gNvwe}JwA&iK7N`gn@O`1 zSEj@A-3Qwe^H7FMydJYA^nMcWX6C?Gr+<`p=-{MHkH#<$7sLD<$G4frz0cf_hBC4b*vaovF@4VeF%NT)#;<{ejm;T4a^6b5Hyta?)r#=8%nNqeAHnE~s`Ki@?{8X-)_i~U$T#^vVA1x_pK)>xYywj z+Dv^;Wq0F67$0C>YZ|^AM*eRhO`SL68(29;d#AsG^DYiW@7eJv*C_6V?S5#Chxw)< zJHLT+9~lGR3-e)7wAp6FdHdeRM>*HZ`L(y~URUO`>O61v@ZZo@aV+NVfhFF2F6t9` z$eHxequP$WkGA&;{?=nZuGGO=)ImquPx14@x-RgBnK9y;k3B~8ZKmJ$GUraP?X{@3 z$9kC#8MWpFWvs`Dt4}dTtaaMn5apk+?fnq`$wv4mYoMDV_z~FVr1zC4yjw6jn^rPD z>Rt3{%)1<&?vMA4z2`izJI@}4FA&A}fWEcN|8k6Hbj!{aByzo}`*H>y?Lu9_k2udQ-uz*Fv-{ooCLiaBd!S@vy$IDn!oJ@Wo`EXf)~~TOTQXU|9e7CTyiV z|L;E1Z>O!81NbDqP1k7Wa=3R(_JQhl`bqSWVY)=WisSJGu-DX|2(JI_8~x|*>-=5S zJLh4ve-U97i_RVg5}ocf(f4=Pz^XCEp`SL{$=@3I2Q6v;z?nnF zypiOO{Rr0?iZJ)aIi+Ugy$$o^EN4}&_aiB16yrDUW&0VPGmixx*0*raym_NB9|gNh zoMSEa3(sRt31hsy=ll3c%iD%MS7AP53Vj*#5d{zJ7kZGP_=Q|6l|D%Jo}7;V$G(U1 zphj#yXDDK(Ma$i9U))-^>u5jx|{vOrB{@p)*0P9Zjt{eNO zwe~&9Lg?8iHOJinUy1s{dCUEh*IEOawVs`>#`8>>pLyyc@ z-NXJ9mXSPI-cNcD1=86T#rux9!q3AR5Z+78!~258xX<(Qws4#l$2+N&mZoZ$ue6EImy@GbL4(sV&uD{k$h4~Ya?fHmThv%-Smz0ns4}f zT<@VQ{@lGUW9FsG`qqA)z?16(D|7ZR|C~z~dmwu?T$`DW=XpnCnA4dQX@D<`c`nS6 z$g?+#KG&dCYA$hOMLq7%9b z{>J{xek;antlvfi=tr(Ep@R-yIw$VNVms z%92;U$Lh`vwL>R3H-xqh5qR5WZm84Dqx*9~Za)n=jFGZG`_G;;`mg%F3w4n8opR+E z)?MqYMm^CV(QBRUC(gc%xRyEM8-11WPMAI^(*T_BVz9kL z`_mr84}Avr^q&u$?e#nhJ!t${<+?_k6-OPaMIPXrv~geDo6w<6_g3zSns0u`j9sGc zw-5C7scKbnZ|a@odhg1rAF`|mu-?Sy9(Yz`{={!bnfXnYBZ2Q#P)}qn{r?Yon5X70 zn=v2v`0>XU2ip?YWbA@!{z&VihXF1`uBxC&`e2k(UrivDt*ne*P% z5k8wd*xOyBY;|S2U%1!w3(-^TX@S2qS#;j*7qE7NIg?Dku>Txy_cch%9WS2DUh-WC z*Ip*jW@s<*vQ{5^>7C6zADOvxt}ja7+&&2T4)24a-tN(er}>nT&nMjH9K+fU#zb-U zIorA3W9lc<4|0t0pKa6gjQ((sd*5{v87Ho>>!_WWm&V$0=)Tu8eGj$~erTUFkL@}t z<-<9UAKJM}?t^9>hqX1>zN~F6+2h-IX&03DFS7LWoabnO|EqP$-{Xh(ui*drxY+k0 zhsx9TeaNA=;G3+4eWz_tS=)XmZ2Q5qZ69uIdu_nB4?hHbuDBCp=}y@8E@j&byy1s1 z-(T@6&d(M-ZO$qC75y0;2Df`2_0{%6@CmJ-xBa*F^Eyy2Y0pgSrn9}>Q&d`R8*2|~ zV~qD8-Yx$u;>BTGME?F-LB=*2{%8lEBL6dJdqd6MxS@Ua#?jujVE(!l^VrSs)mY~b zz~38+wFS04zN3eRXWAa`Gd^hF_w_{o*QzxMnJefpK40Ix6P?&QQIC3TRDEp&^x@=t zCw#lfd~`xD*#^Z|CPtr>{A68F)+Vr?6IlNVu1#d$J8{yr36za#KZCR%uxk_z7{_3~ z#kBoTYVI>-*CWrs`_8Cw4dm@@c##1}& zcq(eznZF*)cJMy*jCL}q$UDvOecLgo#<@W` zbB*)R;A_W_Iap`(_i}^Q?6u$=HReHjIEHROy>QM;r|}B-wWEKapA@zZbpUB{{3~hT zy1gB?;Q{dCeHWf_eqadZ`E~n$Rp<;g%8i0;&bL#u(0vVG4O2239iW-9oyge zV;uN+_IIZL>j1q!z8P|8A@0w(|AVZu^?y8T^i?H0zyBBt-`3avB<3Kv$56{9D8r$U zp-DcE&N^B zee(V$_=UBH|DNCZuYaD2s|UZU`5nCm){c2O+CG`*0zm-d3b&gf(h`#^U-xY-%ku$;j;t08L6YKWP$ zs#kMXGOppeTWfk;Gc?P%W~f4#Tw-T>*k`?FGzG3(?V$VacWH9N9@6VFda=6q*eWd17d!)%M&kFxKTKK69v z&p!1nJ7*~JKizRGe)eanAFeCI7bwP=>?^rWJ`U}+3qCvF4Z~b*1lbwQZp8PAF3xE^G%2^0>5)F- z$20HZ^rEd1hmN)l14bmQFF4~IQGlD3(?9@FSTJH(nqobPKN9N>>rnU`OlgEg^8Y$xjPDCkVS zK1BV!lk4XV?K+(|=Br2Lt>EnJc`KE?<@U{65Arr*_#=JuhCSs4|0{B|3(ssGiw~ZM zGYY=vmEQRs=+~=jU*Y^*2jt)S^;^5SuOb4OoVe=-)Y0{*tLtzk&fM0XTlua|zw|kp zy+6;v`J!jzc@%Up)MXyd7tLvscgNTsDTC1{cgxpDnYkMe<6B)1{1yxA;f*|$72|_W ztTFeg2lV6X@P5&7loxstug=5w3_wGfOI`eG*h3?SV;#9V5BpHmvky^TKBEnidgA*? ztzW;*l<@%iiW7HTgS^i{8Ol+{t8q?f<~wV8>=`TZb+ZiL+sHeDGZgSX{WW;Ewi4$k zzkYo=^A=XPC#d+JXf*5n4@e$Iv8f2;`a4s)$5 z$$t8xlb>0Lvmbio+y|cbkdm_su}4A9DlEiag+#4?-YaDx-&_)T=ram=_KP0JLtb8z z|C!M62hT!49(;xG_bn&X?%j#>EEizyAU9o(5-RLV9)hw=FQ&)do&C+@J z?X6l`clqUfQdzqQ-vh2)yuPp~v9NmCXX_KU*Cf`g##h@HFJF~dy|B8j`u4h-O8;xZ zpsA@}m{?V_8mv~2_bO)0sJw3a+_^W*th{N#?8@?+uAW^vZ~E1<7kK4szEZQI`YSc7 zYrk59Z@Smjtx4Q5!=UqsR#q&pudeeSc$cnNwS3{FOKXMlUP=>wgy{LE+zFHHLI4@)LpuC>816ns+X-^ zQN0R#CQ1CCFp$oEaISo0i?NZ26tbLa1hC7$ve&RZO3M^=v6wR;rmdTzB2{>n+0& zDyj>0eAet4H(ed;BR+XFeZhj+^DFv3n{`9woQjId>u;!-bJZ7OYnGuI)-J1Al;?RA zU!ri*)Cr{%(DEpT1oU`y?eb-b((xtZOA>{3b!(Sig0``wW?_9|;lz@Ki>55DxqZ^4 zg{6xYPn%XcVcO!wHKo-hixyutb#iIx?P%cDlcyFX3U9Aw^97%|b3cEb&#VZVnE4)i zz+atx{x{!2H%zmgus!k26jJe-)OVj%ipqna(;(!*VlFep@ZJhkzqL*3HxQ`4ypMLfwESfxrEh zu8MRaawc83r6D~7^F9y`8N*iomrfsVisNUYeOAI}N|zJ(ImSvdwAw5F^f6rO$83F;F9c0=q#)l>(Snh1?&DJ4hjd$iv7{R;*c>1!Y z9Fvxgdcu&vUxmdQ88?eO)>)b^kc82Y_v4nPoO}=x_}gW%rwD|!bL!E4OY)1JbXywJ=1!wc7t*Og=;VJPb;>f$wKTzYKhR_T z>+Ex^b%pB`Y-<9Kwp+T2EcvHQ_gET{53v_5W~;r_Thw;1F&T;)dT?HXH`lTNZwl*<4E}vL;q#v3|H`pq;3<*z2cZ zixQxJ%F<5`O@ENdV~3?_U|GWKmF>5WSh_-xF(mNUj}AFPNua(m{c@qDnVTgK3B;dk zX}Yu28+BuarFjJR>3XBzgP(l9%|17E@8}Nmyvsg6%zT8FGvhbCZfTEzw67les80HPwptp@m4xV-PqWL?tjkhY zY#aM6%_63lZeM;se&i(224eD!p@siBeSD#PHs~bzpDQ$3bi~xZrFo5Mr1Pfg-#*7W zfm3G#jjVUs=goYcMb`T*jj7WR8p^2K($wKzmz8!QZX57ROe54+@a@7CmaY?Ip}Gi| zWnN_Ic9CDY%+&bo;WNx1Ncf)UNI1v$LAZ| zDsu+*78#r)$i88q!7(fDgr6q}40BLO(%{1bc(K9RlKO^!GV(dUFNj}F2FGbJPAE6{ z1rCZiB!e3Z-RIeljeM}v&HHWOaQ;n;IuM*!wiF8*caq4gI^TDe_)FA*#Q2{1fg>R_>Cs(g#o-L zL8vHz?=pCC0IxFm#Q}Vf!N&#gzc%^5B!GX-;Nt`M6$Zx+Iw$tN^~l;Ijkx6NV2x^1cDzghaTiFNj~iP7u19|Mdyi zC-`$tAB;afGW@Rz;JXceZ2+$`_}lc$X>H^#T0b2EQSI-(>g<4&bGxyz>J1 zfdru&`Cp&#NP<7-_rdt1Ji-4L@V`Ex+oW632jh=FH~38fe6hhlAHc^N{N@1urYY|& z0sH}je<6SmF?{j^_#`8TF9z^`P7t~^fZuQM+XDCugMTT2ziZO{asY2Ncx3>uGI&)0 z|FprY1NiHvytfDNZy9`H0DsQlLjw3EMh<4A(C68!3I43*J%d;6Z=brw{tWJ|6Ukef+{K{4dIa2mN86Phl4Npx^7$7iXcrI14^5 z3m)`)ef~ke*T*l-!YAnW`t(7+*T*Mj;WH@<9`tX0K2x&L2mM>0KIq^2c+kJ~@zN~( zFVBL1E(;#?V}1TtW}*LsEO=QKJedWbo&^v3u_Vm(f_|)z2mM$d5Bjk_eibi)kQ?H! zk|@_J577E}(2w=;YqHR1`ZZ0E>jnK-pHI+__3@w|>*JYztuOzezv|Nm{Z$_i`l~*k z>970p5BjSpFaKCEcAC~!PjNM*Jr`Mng!4Jftny!`FnlwjDOe{&-lZF=fPFV_mc9{ zELHHwV{onVy^Nll^%>x|!et!eW#raLVuXLdU%r=VFJ`?5*w7KgGj;>+t_lCcq?@r< zVln%JAm7W_lYW-|&EdqAUCQwv{8@~6#-5lpH9TU9`Cg`7KVtbjWcXz4p;=SJv-|T% zpRqIIVZ3GdXY7etF9UrIA@3p4BtD8 zc&0xRx&08Xrs_-~=NVfneEe>i>#DkzNp zb6NQOJrp}1ADoczw!Qf`41w}dG6`Gl&8^VSF#dNy&iP)(4==Iw_0W@i<^P*KMtJsp z(j zhYNiU@;}&HV`KaKiTUG8#_mDcZ4mnJLiB^ZdV$UUWB$0=;7ilE6|?sm?ATznzA5;# zMt?3&(~q-v?;9LKvLR`&-!yn;Ty~qqe{J-(#Qw5-7xC+M)2{K*2D9%H_m`UXm>CZ~ zWuG4~<&|7`UZc$@-gAOpO%XO}%l@Z8#&~sUe3zv!H1#qyjX!Sher)VsB8^vB`oYEy zOibeS`v`uJ6E zF#VqoQy7MC zf(Ee;7Uv@)?uf&4>L`R-TW7Pd+qV!V!DJ{Z#pC9WVjD z3JNxHz(xAwOaH{12!|82;-l z`tSn~htWTtg@1b%`hU&R@BC$!bU_}PF2xs?pMS{0zc)*H{~B@&zk?H+tYZ9z1w8yYty(`#u|gC(zw`}KQ(+ZefU{V7i5_?a2%}2&V zZhkrxy_Q>2@n5C*S6G~SNL~`OJaxJo6up+i3X8ktdR*aOQ2g5!uJhBO@GmNQogZDU zM-;v0pN(GEqvqeN_-OvRy$n^n`3fK9GD?an>JxRr` zGe5mHE-%k1`g0WhVTJ2@?`QilVpl1883z#8`Jbogb$Mleg7mtbR@uA}*X7EFXKRo9 z6(5~$x59NkZ?pIir2D_^Jug-Eo_1d23H)YYKD9j@>%)QmVw%O>{zBV_)s`nob-6B8 z_&Upn>3&|}((f?;jh2R&5%%6qca+7+N2hy*#a%vP2V6cnpPJ8B#b4+D5rymgiyfft z@2ASuZ0XrwX#B9^qxD2=IP?D>mWCIJ%lvCx%TwmkNU!rRHkSAv{)a%h>Hb&Sft&G6 z!i)CCm8a;ZE6>*~y<0DFi@Wj}VR2VJ^@@*{kItvo|3aH*rmM?4S>d|8RSLhw-ZRM( zmF}$y-=J`vpEirLe;J_Y@5@5}lESq;A8X?>|8h^#l)`m+r7WahU};FdP0?#UuPI#f zd0XL{kF+n=*XJw^`HOvEeQ8|UEc2t~)67T+#MdZ3Px-J%{@Ra{eu4Zo{vp8`|A^(|=BJ;^kCx{sg=_iWr*O@Gn({X^F8O!U-LCSZ)75rf zryE!F+K-a@rQF!R46<~E_DZ>FyhQQQewxUi`PBZH*m?3lXyfuCc7}P>c)8-Q^LdNK z-F!;AZa%f#bUwE!{yLw}DE{x*dtTb@J^5>VAbvBr^&>`yfwwK&t2u^}O;7nbW{{7KO5 zPTMiDS1j)#5U;lp#ICZu8h_kAC;w%N|5FwhK9lUT7K;lX!F9j(hlOEeTrW5nQUn&A3c5_Xz9u0Ibnk92zyWc z)Od--$>+GD7deoR)~j0-AC0FJpTAaoR@i&$oA%S{E$;en4T@g-fg%UeYkaGvXa3jV zPr@S#U#akBi!(2mEBq;olfTB>El&PgPhPY*<-_(a;U#-R9nrYh6Y|meGYG#Kh->{3 zJHYzVa=t~;YdKe0+|7@SBi#I~P<&qSvta(;s&Jv@Fzj+Fx!#FJxk-}XN1DnC_ba?J^AbQF8we0Yh3zY((_M3p}i-) z#`&9p`T3^3=kF`*z39&)R)3NfmwFdmbd~gtWMIQQMX&Ki7H7Jv?LDK^*?UP>^h(;G zq$_xX;-lxOboyX?xH7A0}YKE{jvI zG%o%s`Fz&WFv>Ibo_y#JOtHV8vscPN;~f@v_4Y-JlfU*K#9j&i>6Y#_dlmkIcPc*L zu=mX8F?;XwIi7_NUM4bu`O$o2+~?+ho26&H+-G@`!FG$YUNruQ#hIU@ppz(%Tb$+6 z_)`{Ve(Ho6@pdUbl=IE@S)1ac@pi>$sp9jZ!$<1iC5MmT@_rTbqxrn;@R@1T=yLc7 ze$3)7A8a-@f%qLNpC!sZ>vBz2_F3ae#Yg8;_ZOs>a>;tK=#}8NWZ|Rx3-Y z8efrx&jy9-@@`eQo(CJ{i^teQ?m3ZAuJC~hU!m|d3fKLP?%%}*QvS0PeY4`D`}cZF zLweo6i(R6g>wf$xOYipY+Ft4Y{W(i7e8jGb-C%u@zu+%fdge#>@2_X!BX+~(^S0u{ zv?cVjH}wB;=6xU#e?g&D_MZ9C<dHHwSIOe zT5enD(Fju9k%cb-Acb0~iaVkGJzc?LMDExed zH!57q^C^WdSM*x{b$+@Py_U1?@3fo;sq$($Cls#btnIm$b2h(S{Bzcemb2JZ;#$sP z_o#1Lp3?3~tNrA-m8{!emsp(q^|(yNxo-PySM*x`;v*Pb-5<1baj16c}c71ALd#5 zp$;x~mGp-!4KG#pUT}E>VUdGRvcFUIO8%Npor4RX6%HC^-6F$=JgkJdQcBkbt zN~L?)-t&^M_wM+s(BjO$rZ2HL)7AKG7N`7g#h-*0h3odK^Ep7#AGh>UUosCPzJcq% zMQvQBtL7g4S&5?8xX7RRU!(ZX zvh>7t|2S9izg*GJQ~Whvr}zw3^cyVh>Q#fqT{&!1e6-*7h~lH$#p4!t>+vbYNBd*D z6t4RPsTa!O5|#h1Ea@K3l5V%+&pb&OXYa}X9EB$>0dd_ArWCI0YlXsfeTm%YA8LQ* zX-iLB`!iw>nXYa}d+?h<+KbrpXDsg8^LC4qkG4OrJA4NDY`wQFF7+jNm*S)SB^h@} zeuTcCwPP-yffi?Z8Arkhd*kZiD2uy#I8M=1-&WY)CH5-m3f^FGw;n}5UA$A#>-rUa zBfYL)={Mc_9c#%m>0d;iQom!Z+}!#tRQ-jn-&qzHK2k5T?l1BYe4gT? z>vuyIJ`EOk`7~PGt>3X$o{}H?v(I{pwi}a`{Pnzq*hAM2ms@(*4p&*+wZn@Py|%;J zPU?DJXX)K~Z&3U%SNeRP;;->$i<6JG!}}Gk?aXn7>->msEY1+<=vUzjcSHj|M z`zon!f3yTRgax{ZntpGkN| z;o8pcSGewXUQ+Esw^Pr~>q>s4o%XZ1+fJiu9$2?icl}h_qs+HRxdfN_bIMJ((<_|( z2z}DxE}vN{KP-a;vA4vu|C6u;uAWFc6*;W1bkpor(iL3V1@o`^A8&enh3aM#(|9J8kEs-7!D9U5ouEe~pX%XSsBHpKQ}|<;?gDuAIv) zPCmLl&QpA}{j9LK$lvFh@pZ{Zx5pJ(_|#|NbDzb@N89Je6|UQFi^WAxBtK7E+|B16 z#fQ%%99Fn)zv6E&Kf3)&e@9&FTfJ)6y56O~BfYNot%_dT)nJe2ytTtV;V%ca}t>x#d$Swe>uuG?L=#octpzPag&9b>wDCgBQuPh7VDw`iy4x+cN{6#-qIjmFV z(sF)8;dk3QA|&}_y1E}vT6$M+CI6C7$;UiPFZmSwaf>sb+OF=&f=fRx{4JUPtuy=( z{(`@*_-HvxKkmx;xTSab%RHycU-InAIc{a>^1n^t5|>HpesGPYA-+}7>wZM{kGdb( zrRa4(vQ70bx*ut`xRh7=k>@P#_9GpNkM2j_cK8fp<`E8CT;wMB5yeOMBgeDw5kJ}G zBYraF%rZzw+8g3p&MAd!Ig37$Udwr#r5_1g%lT=AYdLSQc_1GxXYoT^IX`3RT{-{1 zGT$P0*p;)`CF0u7#I4*Y2W?MASlqQIg^FI!&y2IU=!4YX28+A-(e_R2!?7&%k{>sp zqb$9fPq%-O{D^$qd^ToD_o*!SYYNx$lySFPUN=93Y^+p8-q zAL3t7{O39RMLrc4mwXC-i{c|TpOEB-`8izkPPZ!6pB?->3YAzMqXJ`4oET3*7Q* z{U;yEN1?+1SDf|ljW+48@{;m0cp2D{&e1*cB6uw>I_bXidAMzJl$j7fM`t6F|vwk4yA5?f; z;SVWX)-Oo^u)=Rq^rCxwEb}v@|F)tRKbiQW3g4ypJf`s16#lrv#g8SQKT-G~JMTgK z35Cl%E%6-+m;K4an-yNC_&llbZ3_R6!ew5FeEwA79g6-bh0A;n>A$P+QT_ZM#J{KT zSqlFj3cp3+qQiW=LE+MO5pP!bP69TxDg65a;`*AxpH_Ic!v9R+192WB1M~m?`M2i( zgu?$^;j5h@Qljv7 zpTzusi^88(_&SCEOyQ3x{O1aPM&Z9u_-hLPD}~EJsm$kJE4(o3{~+F>@OcVV1Kc?`%C167|=HD=%e;e!VFKbZf2RrolCzoGDX3hz{Sy}}PDe7nNmRCt@h-%|K%3V&PS#})p+ z6h300|AYDeH-#q^{%eJ&6yBw9Z)wfag)7#3U#Y3DS+>?2zpAFLdVGD&y87{zUz|H> z;?nB6y5$QiQmbmJ7cEGwsb94G&Slpxudn^TrJa3{TUT|!MVbL8ki-O2Oq+m`E~y=y zjh?SI%d*y&b{~T)qUr_x1n#W^FDNc_ndp~xu4R? z%~riOXnEZk1KF49ciK*-=XvdFtKoJbJ+ zH=C+DDS0>DSVmyPkWn@_2__wJGR?m0?tiemf8nrOAMlKmB1<}c>o?u<%)VJrv)Ao- z{goLokVi^HVBS0+eY0;5mNY1jv?<$cbsNHJDq~|;?{hZyy=5uTOFZ9g~>>igepAOccC4iQGELB{(f(%)^!^qKGu`# z&2+SN=n=l2p3PnaZIH+;q?51I`irgzXDXmIsj9{xK}02HCR^Qfst#`J&I}jYt@>_g zYn7Hi@S`h7MX8e~RyF|0EQWJ35FzWWKJCaV+u3ecyn3xIs?J&ms%1k>4ZOZPIqdXi z2K~6?!bYmKDd;*~1Va8DHOwh2)%p!#&0Xl7VK3StP6jOT9 zI@UxJMmrzo5?%v_H<-~Fvyh6DS!yjVLCcr4%4kO!QH_`NZ?r7e^ekVsaPbW=y*X8!AX6L^_6BXm2-RqFw4io`qkDrD(c?#hqdk65h>g@!8rMcR z5yf5~mtOvo*KV*i78Fswc3f^fR=Vu_cw;M zc4E8>L-F|C`wrel$(Rr^6=r-KKh!L>+&*j>b(|7J=`4nam`+Z28}9OSWg%5Amk$FJ zF`qT&z*GyP<%;B`X9_(v+y};*nV9plU?@(b?a)JudT%BC6~FFvn+YE}Za66)`fj^9 zQW9+>sQR_0+pp3>R8JFh{h@cNVLIpwaXFi4KG<&!+(7#gcct2Z#fb5!4t+H|YS!DH z?@GwwSgJOv zD=oL(NHn1`{OYg^869!q7Do6wYo+Lm*te9O;UtXLAaUc&FWb=->yo}qKpv#3Tjy7Ums-1f}wnWOCko$?HfN3-c_r`%as(2E^vAadUz zyz)Q}rbdGro>w|_wl{InMAMm|iH@!d%aKNyQ`CZ79Y#~Q3UHuLp!r$Z=rDyn(HV?P z3-Wz;FzmyiI@<9D`IwH=H@lov*L_g!gc&7G7aLy0-rMNHKj`Kc!sB$I=KF3RH|(g} zAHtL$CLt}TWcVk$Y%xLx%Q<&?ZVoD6&74Pdjg-FF5Nofy`>$Wy0Go%Lp@>ec+@9gF1>c9c{;0*#OV3ijQ?GW0&=Z2?99}8m$hudg zF!Od&(CkzdhBh<9wUL3DuXoh76A>*}4rh7nVns7i`=3JbZg?)A@FgiNsVy!`QKLNt zlgQz~0}Z$dVvP@FrHd`1-1UZwO9!yqkBkcmm1>Q{Lm!u8u*K(fr@J^^rLh_tZ8_}D z&_q`R6(PpvPncGNd=wK=9rjr{j7xILD(n-hBQb^GO$)1WvNRzV;a)xJSR#G~@*7=$ zG0jgd8rA0Qh7hr06nm6mAk+*-2NkD;F{-TvaF5UGl2Em(ig92^13OnOqm5@`RKJrm zOtR<1_X19#+3NeSWxu`y9~sh)IeEpXXjApe!ND?Lrtv9Hy#!8l<~&$+cBS_=K&E;_ ze+fqCpy_OYaXe_)O%}2b-DqbzU28N(hiom(1I_kK%#BRgp~V{KGU57WeLQh;uzP}I z2nJgPJZI_TDoz2GIQTJ&81`gRxlJ@Wbigjry)JB8dfnMUztvr|cY98;)6xT#w(Xd< z7_6eq$%~U8cKhxk*zWelz%L@S1l4i?291Fm6K*?4Hnnl5CMGk^p_Oz>4(W10uKK-N z9aNhw9@D`z&s&j0+1?r&E=*SKH%vLS(-(<^yF$%nY5_;_;Ro|QYE|o6BeIYm2$e*? z3a*vifVS8-%V6sc#0p}4($uiTTlAos&WL=1-S1tnjH?e|YmFbnqjPi8@F6WMHpSB3 zl10Ty`||VL*_CcRI;=FuIC-x-Iwh5^0yolUh@l`+RRROhP+LY*b>*oJr^|`4#(1fe z7+W4Mk}QzSlgyFKlFX1ylXO5f#+?-1l!B>9-5(S&qY}v?$pXnd$sEZn$qdOfNe3im zM>ix&1u}?afn=U!j%1c(hGd$g0}^wf8xo~F zl%tGD7D(nv=168qW=N(%Vn%d>5;LM35~VCmShGb=0`UuF+aLNiTTkDiBg*KBUu317|+vnj%1c(hGZHfW<)nA zF(dH~W>In|Ba#J@d6GGjS&|u&X^@x^-Jryb#7z)X(8)RD^sJPkd`T8a=1JyAW=Up9 zra@x9bb}J}rJI!g{fnRT`Ud!3&RZFFLOzpU_egvk?(zHc^6PfN*~V8G9)IU(qx>O; z-|;&52E$+bI`|&Lv~e_=yy^=aa_^m3TJS+X`@pD)3y!sB~m`Z5jx4?5wzxwW0GKPPhI|`$MApo%RGNse&6>ArAVGl|9=i1aQaWm zgljSU0~Y?c{Ql}L@UMQD`hWUPUjGI8z03(6hHI0*`E5LZgZy6PY7BqYV*g{3e=&yt z()GOlRr!6#voZXVrT!}N`-XA+ElR)P=dJSle(L-9$_&$gn=I}B9KQ#KekyA(hUj^#os$2 zPB}g$dDV}p3{3sqU*h?fC9nQo!^VHr=0AuFJm%3j?J)JfWGVkAphkKrOFt7(lUM=|3$&C z#_*rA=-(9qE&L!USN@{ppNZk0w8ZbLN`FlI`+}-JBi$D0*YHq(4=3)z z{rA(B{(lFIt9a}Xr$CzJf8H|xI3)RpV*KA#{b!&-TwJ_SoL@GcoBD@P&z%3yK)v8G z6{CNVNqbL7c(L(GHV;6xtbAe-qMi>UWS-7AF5kmhzvy zRS*{yD9}_&Fgvo+YDSGxa|Th3CqDa$M-2i}By@LH-E?BD(lX}_U=OICn~V)TDUj{kDH7ijC{ z#O0>o$>_a${s)%$S(f}RgE#B%n6lqU_l)G1VSb|@CVz`1{$QV$4)u4gDi4#tVA220 zJ0xNF)#R^(2FAtDH}4dDT>QVL;zt=M&*vm>>ND@Z0SY+%|19|nG4_8E)|niCYNs$H z8^hlK>r9S+@h-uy#?;@#7QS(};N!;6GZy|~$z%VZAEy8Q%~JknrUX@qDL?+c16Te{ zdjx+eMn9U#@rk{HKQ2yzH0}RK3x8blaqa&R%l^mOKA}?WBPjz+{d+9_yKs*n))Xku zCjabazWiqo2q13$`yLCwWlr#$W9sh;3qN+R;N#{Wx4)IQ|I{JDS7PG-Dhq%4!-6+A z3{3z1zKyR6emN%oK5D7IK~3-tX@)wP`d_m6@B9Y&6^s3MEC`J!m121|_3yFpFG^nh zj)Ka@d9c-EFCT{TE>U z9L1+}<#284zf`_+W_t7fd*DCL{;eA<{|QU|Dc&rvdH*wu z|2E2Rh4pik{iZ(i{>PU3SNhHPGyV6jn?(Glje1tRZ)Biu)%gXB|Bm@0L(^XK{#XB> z{A&W6>l%>2qF9M7y%=lCMgunu6`tN#6{hyQNAJrb-zr04qziGeXFB=&c zw~6mc`^|QGb|_4i-B@71;??;Yx&IR+kQa9xHJ^xk#i@7yTaG`b|0X2AYAk@v`mbFd z_C4l(`r|^w=`nHs0i0fem*QdBMaKbH|Cx1yZ>)Y%ys!L%*}+mF#N+Rja{MXDPl;^k z`wKACgm@@E%U@!6bcJa@&M!Fq6DNgYRZg`38ixM??4NP`A;~9Yg&_}B9nwDlqd0!~ zParWIdik-eru|Ps{Brzh$?NuSX87L$nB&hg`eCXU(tn?YPyQ(+hGSWrsQw_-Li~-e z{^9f=m;AiO!%!3AzirWfiQ%EE2=RXmMsfNNeMu z4{@j;CnSGNKE(LJyv+Ll1qe#c{?ro!T+?_kHN@8-J~=-5WdZ8;L#T)NEpOuaBE!R0 zSBP)H_{r&?k-Q%N?`HTXEagAW@JWV$9)g-9Ir@Lz+0i{p>|8EQ`?PKpz?Km16+)c<~nJB}}&5}?lGM+zo?6;vw6 zUzGfmrhgm5f7ud0+rBEm(;B~>;U58mNWam3UY5M>zgrpp`jbx zk_R&R=i*XtKesV_4>}dje?`fsWRBSW#~FUi(teg0{kJpx1&jTwlAi|#57dvgfV=5X z<$44exUM7DvR1oL?`EMg7=6rZYelNUC z{(n{dV$}BweJi5q87goF#N5S^~X6` z@h9cR6x#n0hQHFnFJBP)i?VW&|8<5xZsAW|6ukQU1^Ft&uUP8u^p6EUB~BE7^9=tZ zi~l#hAV6gBK>am%nf=$mV*irl_4c#C@VhPi#EXy^4)y&!>|5q2GyXA@kWRB`c)FIhmZl`nSrJ-B-L}J;yh|FPg941u7B~+QSKY zJdt=~0xE9*)c)DzwoKkBIop%2;Hp%DsC Date: Tue, 3 Aug 2021 12:07:49 +0800 Subject: [PATCH 2/2] fix cond --- ohos/CMakeLists.txt | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/ohos/CMakeLists.txt b/ohos/CMakeLists.txt index fa8aa3d2..be7364b6 100644 --- a/ohos/CMakeLists.txt +++ b/ohos/CMakeLists.txt @@ -144,7 +144,7 @@ set(glslang_libs_name glslang OGLCompiler OSDependent SPIRV glslang-default-reso ############################# TBB ############################# -if(USE_JOB_SYSTEM_TASKFLOW) +if(USE_JOB_SYSTEM_TBB) add_library(tbb STATIC IMPORTED GLOBAL) set_target_properties(tbb PROPERTIES IMPORTED_LOCATION ${ohos_lib_dir}/libtbb_static.a @@ -158,6 +158,9 @@ if(USE_JOB_SYSTEM_TASKFLOW) IMPORTED_LOCATION ${ohos_lib_dir}/libtbbmalloc_proxy_static.a ) set(tbb_libs_name tbbmalloc_proxy tbbmalloc tbb) + list(APPEND CC_EXTERNAL_LIBS + ${tbb_libs_name} + ) endif() list(APPEND CC_EXTERNAL_LIBS @@ -172,12 +175,6 @@ list(APPEND CC_EXTERNAL_LIBS mpg123 ) -if(USE_JOB_SYSTEM_TASKFLOW) - list(APPEND CC_EXTERNAL_LIBS - ${tbb_libs_name} - ) -endif() - set(ZLIB z) if(NOT USE_MODULES) list(APPEND CC_EXTERNAL_LIBS ${ZLIB})