diff --git a/sources/jemalloc/CMakeLists.txt b/sources/jemalloc/CMakeLists.txt new file mode 100755 index 00000000..08b08b58 --- /dev/null +++ b/sources/jemalloc/CMakeLists.txt @@ -0,0 +1,57 @@ +message(STATUS "Add dependence target: jemalloc ...") + +# Define target name +set(TARGET_NAME jemalloc) + +if(MSVC) + set(INCLUDE_FILES ${COCOS_EXTERNAL_PATH}/jemalloc/include-win ${COCOS_EXTERNAL_PATH}/jemalloc/include-win/msvc_compat) +else() + set(INCLUDE_FILES ${COCOS_EXTERNAL_PATH}/jemalloc/include-linux) +endif() + +include_directories( ${INCLUDE_FILES} ) + +set (SOURCE_FILES + src/je_arena.c + src/je_atomic.c + src/je_base.c + src/je_bitmap.c + src/je_chunk.c + src/je_chunk_dss.c + src/je_chunk_mmap.c + src/je_ckh.c + src/je_ctl.c + src/je_extent.c + src/je_hash.c + src/je_huge.c + src/je_jemalloc.c + src/je_mb.c + src/je_mutex.c + src/je_nstime.c + src/je_pages.c + src/je_prng.c + src/je_prof.c + src/je_quarantine.c + src/je_rtree.c + src/je_spin.c + src/je_stats.c + src/je_tcache.c + src/je_ticker.c + src/je_tsd.c + src/je_util.c + src/je_witness.c +) + +add_library(${TARGET_NAME} STATIC ${SOURCE_FILES}) + +if(MSVC) + set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_FLAGS "/wd4996") +endif() + +if(NOT COCOS_PLATFORM_IOS) + set_target_properties(${TARGET_NAME} PROPERTIES OUTPUT_NAME_DEBUG jemalloc_d) +endif() + +set_target_properties(${TARGET_NAME} PROPERTIES FOLDER External) + +message(STATUS "${TARGET_NAME} Configuration completed.") diff --git a/sources/jemalloc/ChangeLog b/sources/jemalloc/ChangeLog new file mode 100755 index 00000000..a9406853 --- /dev/null +++ b/sources/jemalloc/ChangeLog @@ -0,0 +1,1043 @@ +Following are change highlights associated with official releases. Important +bug fixes are all mentioned, but some internal enhancements are omitted here for +brevity. Much more detail can be found in the git revision history: + + https://github.com/jemalloc/jemalloc + +* 4.5.0 (February 28, 2017) + + This is the first release to benefit from much broader continuous integration + testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure + in place for prior releases, it would have caught all of the most serious + regressions fixed by this release. + + New features: + - Add --disable-thp and the opt.thp to provide opt-out mechanisms for + transparent huge page integration. (@jasone) + - Update zone allocator integration to work with macOS 10.12. (@glandium) + - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and + EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not + during configuration. (@jasone, @ronawho) + + Bug fixes: + - Fix DSS (sbrk(2)-based) allocation. This regression was first released in + 4.3.0. (@jasone) + - Handle race in per size class utilization computation. This functionality + was first released in 4.0.0. (@interwq) + - Fix lock order reversal during gdump. (@jasone) + - Fix-refactor tcache synchronization. This regression was first released in + 4.0.0. (@jasone) + - Fix various JSON-formatted malloc_stats_print() bugs. This functionality + was first released in 4.3.0. (@jasone) + - Fix huge-aligned allocation. This regression was first released in 4.4.0. + (@jasone) + - When transparent huge page integration is enabled, detect what state pages + start in according to the kernel's current operating mode, and only convert + arena chunks to non-huge during purging if that is not their initial state. + This functionality was first released in 4.4.0. (@jasone) + - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case. + This regression was first released in 4.0.0. (@jasone, @428desmo) + - Properly detect sparc64 when building for Linux. (@glaubitz) + +* 4.4.0 (December 3, 2016) + + New features: + - Add configure support for *-*-linux-android. (@cferris1000, @jasone) + - Add the --disable-syscall configure option, for use on systems that place + security-motivated limitations on syscall(2). (@jasone) + - Add support for Debian GNU/kFreeBSD. (@thesam) + + Optimizations: + - Add extent serial numbers and use them where appropriate as a sort key that + is higher priority than address, so that the allocation policy prefers older + extents. This tends to improve locality (decrease fragmentation) when + memory grows downward. (@jasone) + - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized + on Linux 4.5 and newer. (@jasone) + - Mark partially purged arena chunks as non-huge-page. This improves + interaction with Linux's transparent huge page functionality. (@jasone) + + Bug fixes: + - Fix size class computations for edge conditions involving extremely large + allocations. This regression was first released in 4.0.0. (@jasone, + @ingvarha) + - Remove overly restrictive assertions related to the cactive statistic. This + regression was first released in 4.1.0. (@jasone) + - Implement a more reliable detection scheme for os_unfair_lock on macOS. + (@jszakmeister) + +* 4.3.1 (November 7, 2016) + + Bug fixes: + - Fix a severe virtual memory leak. This regression was first released in + 4.3.0. (@interwq, @jasone) + - Refactor atomic and prng APIs to restore support for 32-bit platforms that + use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) + +* 4.3.0 (November 4, 2016) + + This is the first release that passes the test suite for multiple Windows + configurations, thanks in large part to @glandium setting up continuous + integration via AppVeyor (and Travis CI for Linux and OS X). + + New features: + - Add "J" (JSON) support to malloc_stats_print(). (@jasone) + - Add Cray compiler support. (@ronawho) + + Optimizations: + - Add/use adaptive spinning for bootstrapping and radix tree node + initialization. (@jasone) + + Bug fixes: + - Fix large allocation to search starting in the optimal size class heap, + which can substantially reduce virtual memory churn and fragmentation. This + regression was first released in 4.0.0. (@mjp41, @jasone) + - Fix stats.arenas..nthreads accounting. (@interwq) + - Fix and simplify decay-based purging. (@jasone) + - Make DSS (sbrk(2)-related) operations lockless, which resolves potential + deadlocks during thread exit. (@jasone) + - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, + @jasone) + - Fix over-sized allocation of arena_t (plus associated stats) data + structures. (@jasone, @interwq) + - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) + - Fix a Valgrind integration bug. (@ronawho) + - Disallow 0x5a junk filling when running in Valgrind. (@jasone) + - Fix a file descriptor leak on Linux. This regression was first released in + 4.2.0. (@vsarunas, @jasone) + - Fix static linking of jemalloc with glibc. (@djwatson) + - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This + works around other libraries' system call wrappers performing reentrant + allocation. (@kspinka, @Whissi, @jasone) + - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, + @jasone) + - Fix cached memory management to avoid needless commit/decommit operations + during purging, which resolves permanent virtual memory map fragmentation + issues on Windows. (@mjp41, @jasone) + - Fix TSD fetches to avoid (recursive) allocation. This is relevant to + non-TLS and Windows configurations. (@jasone) + - Fix malloc_conf overriding to work on Windows. (@jasone) + - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) + +* 4.2.1 (June 8, 2016) + + Bug fixes: + - Fix bootstrapping issues for configurations that require allocation during + tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) + - Fix gettimeofday() version of nstime_update(). (@ronawho) + - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) + - Fix potential VM map fragmentation regression. (@jasone) + - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) + - Fix heap profiling context leaks in reallocation edge cases. (@jasone) + +* 4.2.0 (May 12, 2016) + + New features: + - Add the arena..reset mallctl, which makes it possible to discard all of + an arena's allocations in a single operation. (@jasone) + - Add the stats.retained and stats.arenas..retained statistics. (@jasone) + - Add the --with-version configure option. (@jasone) + - Support --with-lg-page values larger than actual page size. (@jasone) + + Optimizations: + - Use pairing heaps rather than red-black trees for various hot data + structures. (@djwatson, @jasone) + - Streamline fast paths of rtree operations. (@jasone) + - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) + - Decommit unused virtual memory if the OS does not overcommit. (@jasone) + - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order + to avoid unfortunate interactions during fork(2). (@jasone) + + Bug fixes: + - Fix chunk accounting related to triggering gdump profiles. (@jasone) + - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) + - Scale leak report summary according to sampling probability. (@jasone) + +* 4.1.1 (May 3, 2016) + + This bugfix release resolves a variety of mostly minor issues, though the + bitmap fix is critical for 64-bit Windows. + + Bug fixes: + - Fix the linear scan version of bitmap_sfu() to shift by the proper amount + even when sizeof(long) is not the same as sizeof(void *), as on 64-bit + Windows. (@jasone) + - Fix hashing functions to avoid unaligned memory accesses (and resulting + crashes). This is relevant at least to some ARM-based platforms. + (@rkmisra) + - Fix fork()-related lock rank ordering reversals. These reversals were + unlikely to cause deadlocks in practice except when heap profiling was + enabled and active. (@jasone) + - Fix various chunk leaks in OOM code paths. (@jasone) + - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) + - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) + - Fix a variety of test failures that were due to test fragility rather than + core bugs. (@jasone) + +* 4.1.0 (February 28, 2016) + + This release is primarily about optimizations, but it also incorporates a lot + of portability-motivated refactoring and enhancements. Many people worked on + this release, to an extent that even with the omission here of minor changes + (see git revision history), and of the people who reported and diagnosed + issues, so much of the work was contributed that starting with this release, + changes are annotated with author credits to help reflect the collaborative + effort involved. + + New features: + - Implement decay-based unused dirty page purging, a major optimization with + mallctl API impact. This is an alternative to the existing ratio-based + unused dirty page purging, and is intended to eventually become the sole + purging mechanism. New mallctls: + + opt.purge + + opt.decay_time + + arena..decay + + arena..decay_time + + arenas.decay_time + + stats.arenas..decay_time + (@jasone, @cevans87) + - Add --with-malloc-conf, which makes it possible to embed a default + options string during configuration. This was motivated by the desire to + specify --with-malloc-conf=purge:decay , since the default must remain + purge:ratio until the 5.0.0 release. (@jasone) + - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin) + - Make *allocx() size class overflow behavior defined. The maximum + size class is now less than PTRDIFF_MAX to protect applications against + numerical overflow, and all allocation functions are guaranteed to indicate + errors rather than potentially crashing if the request size exceeds the + maximum size class. (@jasone) + - jeprof: + + Add raw heap profile support. (@jasone) + + Add --retain and --exclude for backtrace symbol filtering. (@jasone) + + Optimizations: + - Optimize the fast path to combine various bootstrapping and configuration + checks and execute more streamlined code in the common case. (@interwq) + - Use linear scan for small bitmaps (used for small object tracking). In + addition to speeding up bitmap operations on 64-bit systems, this reduces + allocator metadata overhead by approximately 0.2%. (@djwatson) + - Separate arena_avail trees, which substantially speeds up run tree + operations. (@djwatson) + - Use memoization (boot-time-computed table) for run quantization. Separate + arena_avail trees reduced the importance of this optimization. (@jasone) + - Attempt mmap-based in-place huge reallocation. This can dramatically speed + up incremental huge reallocation. (@jasone) + + Incompatible changes: + - Make opt.narenas unsigned rather than size_t. (@jasone) + + Bug fixes: + - Fix stats.cactive accounting regression. (@rustyx, @jasone) + - Handle unaligned keys in hash(). This caused problems for some ARM systems. + (@jasone, @cferris1000) + - Refactor arenas array. In addition to fixing a fork-related deadlock, this + makes arena lookups faster and simpler. (@jasone) + - Move retained memory allocation out of the default chunk allocation + function, to a location that gets executed even if the application installs + a custom chunk allocation function. This resolves a virtual memory leak. + (@buchgr) + - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) + - Fix run quantization. In practice this bug had no impact unless + applications requested memory with alignment exceeding one page. + (@jasone, @djwatson) + - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) + - jeprof: + + Don't discard curl options if timeout is not defined. (@djwatson) + + Detect failed profile fetches. (@djwatson) + - Fix stats.arenas..{dss,lg_dirty_mult,decay_time,pactive,pdirty} for + --disable-stats case. (@jasone) + +* 4.0.4 (October 24, 2015) + + This bugfix release fixes another xallocx() regression. No other regressions + have come to light in over a month, so this is likely a good starting point + for people who prefer to wait for "dot one" releases with all the major issues + shaken out. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large + allocations that have been randomly assigned an offset of 0 when + --enable-cache-oblivious configure option is enabled. + +* 4.0.3 (September 24, 2015) + + This bugfix release continues the trend of xallocx() and heap profiling fixes. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large + allocations when --enable-cache-oblivious configure option is enabled. + - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations + when resizing from/to a size class that is not a multiple of the chunk size. + - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap + profile dumping started. + - Work around a potentially bad thread-specific data initialization + interaction with NPTL (glibc's pthreads implementation). + +* 4.0.2 (September 21, 2015) + + This bugfix release addresses a few bugs specific to heap profiling. + + Bug fixes: + - Fix ixallocx_prof_sample() to never modify nor create sampled small + allocations. xallocx() is in general incapable of moving small allocations, + so this fix removes buggy code without loss of generality. + - Fix irallocx_prof_sample() to always allocate large regions, even when + alignment is non-zero. + - Fix prof_alloc_rollback() to read tdata from thread-specific data rather + than dereferencing a potentially invalid tctx. + +* 4.0.1 (September 15, 2015) + + This is a bugfix release that is somewhat high risk due to the amount of + refactoring required to address deep xallocx() problems. As a side effect of + these fixes, xallocx() now tries harder to partially fulfill requests for + optional extra space. Note that a couple of minor heap profiling + optimizations are included, but these are better thought of as performance + fixes that were integral to disovering most of the other bugs. + + Optimizations: + - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the + fast path when heap profiling is enabled. Additionally, split a special + case out into arena_prof_tctx_reset(), which also avoids chunk metadata + reads. + - Optimize irallocx_prof() to optimistically update the sampler state. The + prior implementation appears to have been a holdover from when + rallocx()/xallocx() functionality was combined as rallocm(). + + Bug fixes: + - Fix TLS configuration such that it is enabled by default for platforms on + which it works correctly. + - Fix arenas_cache_cleanup() and arena_get_hard() to handle + allocation/deallocation within the application's thread-specific data + cleanup functions even after arenas_cache is torn down. + - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS. + - Fix chunk purge hook calls for in-place huge shrinking reallocation to + specify the old chunk size rather than the new chunk size. This bug caused + no correctness issues for the default chunk purge function, but was + visible to custom functions set via the "arena..chunk_hooks" mallctl. + - Fix heap profiling bugs: + + Fix heap profiling to distinguish among otherwise identical sample sites + with interposed resets (triggered via the "prof.reset" mallctl). This bug + could cause data structure corruption that would most likely result in a + segfault. + + Fix irealloc_prof() to prof_alloc_rollback() on OOM. + + Make one call to prof_active_get_unlocked() per allocation event, and use + the result throughout the relevant functions that handle an allocation + event. Also add a missing check in prof_realloc(). These fixes protect + allocation events against concurrent prof_active changes. + + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample() + in the correct order. + + Fix prof_realloc() to call prof_free_sampled_object() after calling + prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were + the same, the tctx could have been prematurely destroyed. + - Fix portability bugs: + + Don't bitshift by negative amounts when encoding/decoding run sizes in + chunk header maps. This affected systems with page sizes greater than 8 + KiB. + + Rename index_t to szind_t to avoid an existing type on Solaris. + + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to + match glibc and avoid compilation errors when including both + jemalloc/jemalloc.h and malloc.h in C++ code. + + Don't assume that /bin/sh is appropriate when running size_classes.sh + during configuration. + + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM. + + Link tests to librt if it contains clock_gettime(2). + +* 4.0.0 (August 17, 2015) + + This version contains many speed and space optimizations, both minor and + major. The major themes are generalization, unification, and simplification. + Although many of these optimizations cause no visible behavior change, their + cumulative effect is substantial. + + New features: + - Normalize size class spacing to be consistent across the complete size + range. By default there are four size classes per size doubling, but this + is now configurable via the --with-lg-size-class-group option. Also add the + --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and + --with-lg-tiny-min options, which can be used to tweak page and size class + settings. Impacts: + + Worst case performance for incrementally growing/shrinking reallocation + is improved because there are far fewer size classes, and therefore + copying happens less often. + + Internal fragmentation is limited to 20% for all but the smallest size + classes (those less than four times the quantum). (1B + 4 KiB) + and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation. + + Chunk fragmentation tends to be lower because there are fewer distinct run + sizes to pack. + - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and + "tcache.destroy" mallctls control tcache lifetime and flushing, and the + MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API + control which tcache is used for each operation. + - Implement per thread heap profiling, as well as the ability to + enable/disable heap profiling on a per thread basis. Add the "prof.reset", + "prof.lg_sample", "thread.prof.name", "thread.prof.active", + "opt.prof_thread_active_init", "prof.thread_active_init", and + "thread.prof.active" mallctls. + - Add support for per arena application-specified chunk allocators, configured + via the "arena..chunk_hooks" mallctl. + - Refactor huge allocation to be managed by arenas, so that arenas now + function as general purpose independent allocators. This is important in + the context of user-specified chunk allocators, aside from the scalability + benefits. Related new statistics: + + The "stats.arenas..huge.allocated", "stats.arenas..huge.nmalloc", + "stats.arenas..huge.ndalloc", and "stats.arenas..huge.nrequests" + mallctls provide high level per arena huge allocation statistics. + + The "arenas.nhchunks", "arenas.hchunk..size", + "stats.arenas..hchunks..nmalloc", + "stats.arenas..hchunks..ndalloc", + "stats.arenas..hchunks..nrequests", and + "stats.arenas..hchunks..curhchunks" mallctls provide per size class + statistics. + - Add the 'util' column to malloc_stats_print() output, which reports the + proportion of available regions that are currently in use for each small + size class. + - Add "alloc" and "free" modes for for junk filling (see the "opt.junk" + mallctl), so that it is possible to separately enable junk filling for + allocation versus deallocation. + - Add the jemalloc-config script, which provides information about how + jemalloc was configured, and how to integrate it into application builds. + - Add metadata statistics, which are accessible via the "stats.metadata", + "stats.arenas..metadata.mapped", and + "stats.arenas..metadata.allocated" mallctls. + - Add the "stats.resident" mallctl, which reports the upper limit of + physically resident memory mapped by the allocator. + - Add per arena control over unused dirty page purging, via the + "arenas.lg_dirty_mult", "arena..lg_dirty_mult", and + "stats.arenas..lg_dirty_mult" mallctls. + - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump + feature on/off during program execution. + - Add sdallocx(), which implements sized deallocation. The primary + optimization over dallocx() is the removal of a metadata read, which often + suffers an L1 cache miss. + - Add missing header includes in jemalloc/jemalloc.h, so that applications + only have to #include . + - Add support for additional platforms: + + Bitrig + + Cygwin + + DragonFlyBSD + + iOS + + OpenBSD + + OpenRISC/or1k + + Optimizations: + - Maintain dirty runs in per arena LRUs rather than in per arena trees of + dirty-run-containing chunks. In practice this change significantly reduces + dirty page purging volume. + - Integrate whole chunks into the unused dirty page purging machinery. This + reduces the cost of repeated huge allocation/deallocation, because it + effectively introduces a cache of chunks. + - Split the arena chunk map into two separate arrays, in order to increase + cache locality for the frequently accessed bits. + - Move small run metadata out of runs, into arena chunk headers. This reduces + run fragmentation, smaller runs reduce external fragmentation for small size + classes, and packed (less uniformly aligned) metadata layout improves CPU + cache set distribution. + - Randomly distribute large allocation base pointer alignment relative to page + boundaries in order to more uniformly utilize CPU cache sets. This can be + disabled via the --disable-cache-oblivious configure option, and queried via + the "config.cache_oblivious" mallctl. + - Micro-optimize the fast paths for the public API functions. + - Refactor thread-specific data to reside in a single structure. This assures + that only a single TLS read is necessary per call into the public API. + - Implement in-place huge allocation growing and shrinking. + - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make + additional optimizations that reduce maximum lookup depth to one or two + levels. This resolves what was a concurrency bottleneck for per arena huge + allocation, because a global data structure is critical for determining + which arenas own which huge allocations. + + Incompatible changes: + - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious + warnings by default. + - Assure that the constness of malloc_usable_size()'s return type matches that + of the system implementation. + - Change the heap profile dump format to support per thread heap profiling, + rename pprof to jeprof, and enhance it with the --thread= option. As a + result, the bundled jeprof must now be used rather than the upstream + (gperftools) pprof. + - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can + internally deadlock on some platforms. + - Change the "arenas.nlruns" mallctl type from size_t to unsigned. + - Replace the "stats.arenas..bins..allocated" mallctl with + "stats.arenas..bins..curregs". + - Ignore MALLOC_CONF in set{uid,gid,cap} binaries. + - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the + MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage. + + Removed features: + - Remove the *allocm() API, which is superseded by the *allocx() API. + - Remove the --enable-dss options, and make dss non-optional on all platforms + which support sbrk(2). + - Remove the "arenas.purge" mallctl, which was obsoleted by the + "arena..purge" mallctl in 3.1.0. + - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically + detects whether it is running inside Valgrind. + - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and + "stats.huge.ndalloc" mallctls. + - Remove the --enable-mremap option. + - Remove the "stats.chunks.current", "stats.chunks.total", and + "stats.chunks.high" mallctls. + + Bug fixes: + - Fix the cactive statistic to decrease (rather than increase) when active + memory decreases. This regression was first released in 3.5.0. + - Fix OOM handling in memalign() and valloc(). A variant of this bug existed + in all releases since 2.0.0, which introduced these functions. + - Fix an OOM-related regression in arena_tcache_fill_small(), which could + cause cache corruption on OOM. This regression was present in all releases + from 2.2.0 through 3.6.0. + - Fix size class overflow handling for malloc(), posix_memalign(), memalign(), + calloc(), and realloc() when profiling is enabled. + - Fix the "arena..dss" mallctl to return an error if "primary" or + "secondary" precedence is specified, but sbrk(2) is not supported. + - Fix fallback lg_floor() implementations to handle extremely large inputs. + - Ensure the default purgeable zone is after the default zone on OS X. + - Fix latent bugs in atomic_*(). + - Fix the "arena..dss" mallctl to handle read-only calls. + - Fix tls_model configuration to enable the initial-exec model when possible. + - Mark malloc_conf as a weak symbol so that the application can override it. + - Correctly detect glibc's adaptive pthread mutexes. + - Fix the --without-export configure option. + +* 3.6.0 (March 31, 2014) + + This version contains a critical bug fix for a regression present in 3.5.0 and + 3.5.1. + + Bug fixes: + - Fix a regression in arena_chunk_alloc() that caused crashes during + small/large allocation if chunk allocation failed. In the absence of this + bug, chunk allocation failure would result in allocation failure, e.g. NULL + return from malloc(). This regression was introduced in 3.5.0. + - Fix backtracing for gcc intrinsics-based backtracing by specifying + -fno-omit-frame-pointer to gcc. Note that the application (and all the + libraries it links to) must also be compiled with this option for + backtracing to be reliable. + - Use dss allocation precedence for huge allocations as well as small/large + allocations. + - Fix test assertion failure message formatting. This bug did not manifest on + x86_64 systems because of implementation subtleties in va_list. + - Fix inconsequential test failures for hash and SFMT code. + + New features: + - Support heap profiling on FreeBSD. This feature depends on the proc + filesystem being mounted during heap profile dumping. + +* 3.5.1 (February 25, 2014) + + This version primarily addresses minor bugs in test code. + + Bug fixes: + - Configure Solaris/Illumos to use MADV_FREE. + - Fix junk filling for mremap(2)-based huge reallocation. This is only + relevant if configuring with the --enable-mremap option specified. + - Avoid compilation failure if 'restrict' C99 keyword is not supported by the + compiler. + - Add a configure test for SSE2 rather than assuming it is usable on i686 + systems. This fixes test compilation errors, especially on 32-bit Linux + systems. + - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit + test. + - Fix/remove flawed alignment-related overflow tests. + - Prevent compiler optimizations that could change backtraces in the + prof_accum unit test. + +* 3.5.0 (January 22, 2014) + + This version focuses on refactoring and automated testing, though it also + includes some non-trivial heap profiling optimizations not mentioned below. + + New features: + - Add the *allocx() API, which is a successor to the experimental *allocm() + API. The *allocx() functions are slightly simpler to use because they have + fewer parameters, they directly return the results of primary interest, and + mallocx()/rallocx() avoid the strict aliasing pitfall that + allocm()/rallocm() share with posix_memalign(). Note that *allocm() is + slated for removal in the next non-bugfix release. + - Add support for LinuxThreads. + + Bug fixes: + - Unless heap profiling is enabled, disable floating point code and don't link + with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64 + systems, makes it possible to completely disable floating point register + use. Some versions of glibc neglect to save/restore caller-saved floating + point registers during dynamic lazy symbol loading, and the symbol loading + code uses whatever malloc the application happens to have linked/loaded + with, the result being potential floating point register corruption. + - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling + backtrace creation in imemalign(). This bug impacted posix_memalign() and + aligned_alloc(). + - Fix a file descriptor leak in a prof_dump_maps() error path. + - Fix prof_dump() to close the dump file descriptor for all relevant error + paths. + - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for + allocation, not just deallocation. + - Fix a data race for large allocation stats counters. + - Fix a potential infinite loop during thread exit. This bug occurred on + Solaris, and could affect other platforms with similar pthreads TSD + implementations. + - Don't junk-fill reallocations unless usable size changes. This fixes a + violation of the *allocx()/*allocm() semantics. + - Fix growing large reallocation to junk fill new space. + - Fix huge deallocation to junk fill when munmap is disabled. + - Change the default private namespace prefix from empty to je_, and change + --with-private-namespace-prefix so that it prepends an additional prefix + rather than replacing je_. This reduces the likelihood of applications + which statically link jemalloc experiencing symbol name collisions. + - Add missing private namespace mangling (relevant when + --with-private-namespace is specified). + - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as + static even for debug builds. + - Add a missing mutex unlock in a malloc_init_hard() error path. In practice + this error path is never executed. + - Fix numerous bugs in malloc_strotumax() error handling/reporting. These + bugs had no impact except for malformed inputs. + - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by + existing calls, so they had no impact. + +* 3.4.1 (October 20, 2013) + + Bug fixes: + - Fix a race in the "arenas.extend" mallctl that could cause memory corruption + of internal data structures and subsequent crashes. + - Fix Valgrind integration flaws that caused Valgrind warnings about reads of + uninitialized memory in: + + arena chunk headers + + internal zero-initialized data structures (relevant to tcache and prof + code) + - Preserve errno during the first allocation. A readlink(2) call during + initialization fails unless /etc/malloc.conf exists, so errno was typically + set during the first allocation prior to this fix. + - Fix compilation warnings reported by gcc 4.8.1. + +* 3.4.0 (June 2, 2013) + + This version is essentially a small bugfix release, but the addition of + aarch64 support requires that the minor version be incremented. + + Bug fixes: + - Fix race-triggered deadlocks in chunk_record(). These deadlocks were + typically triggered by multiple threads concurrently deallocating huge + objects. + + New features: + - Add support for the aarch64 architecture. + +* 3.3.1 (March 6, 2013) + + This version fixes bugs that are typically encountered only when utilizing + custom run-time options. + + Bug fixes: + - Fix a locking order bug that could cause deadlock during fork if heap + profiling were enabled. + - Fix a chunk recycling bug that could cause the allocator to lose track of + whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause + corruption if allocating via sbrk(2) (unlikely unless running with the + "dss:primary" option specified). This was completely harmless on Linux + unless using mlockall(2) (and unlikely even then, unless the + --disable-munmap configure option or the "dss:primary" option was + specified). This regression was introduced in 3.1.0 by the + mlockall(2)/madvise(2) interaction fix. + - Fix TLS-related memory corruption that could occur during thread exit if the + thread never allocated memory. Only the quarantine and prof facilities were + susceptible. + - Fix two quarantine bugs: + + Internal reallocation of the quarantined object array leaked the old + array. + + Reallocation failure for internal reallocation of the quarantined object + array (very unlikely) resulted in memory corruption. + - Fix Valgrind integration to annotate all internally allocated memory in a + way that keeps Valgrind happy about internal data structure access. + - Fix building for s390 systems. + +* 3.3.0 (January 23, 2013) + + This version includes a few minor performance improvements in addition to the + listed new features and bug fixes. + + New features: + - Add clipping support to lg_chunk option processing. + - Add the --enable-ivsalloc option. + - Add the --without-export option. + - Add the --disable-zone-allocator option. + + Bug fixes: + - Fix "arenas.extend" mallctl to output the number of arenas. + - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory + is undefined. + - Fix build break on FreeBSD related to alloca.h. + +* 3.2.0 (November 9, 2012) + + In addition to a couple of bug fixes, this version modifies page run + allocation and dirty page purging algorithms in order to better control + page-level virtual memory fragmentation. + + Incompatible changes: + - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1). + + Bug fixes: + - Fix dss/mmap allocation precedence code to use recyclable mmap memory only + after primary dss allocation fails. + - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced + in 3.1.0 by the addition of the "arena..purge" mallctl. + +* 3.1.0 (October 16, 2012) + + New features: + - Auto-detect whether running inside Valgrind, thus removing the need to + manually specify MALLOC_CONF=valgrind:true. + - Add the "arenas.extend" mallctl, which allows applications to create + manually managed arenas. + - Add the ALLOCM_ARENA() flag for {,r,d}allocm(). + - Add the "opt.dss", "arena..dss", and "stats.arenas..dss" mallctls, + which provide control over dss/mmap precedence. + - Add the "arena..purge" mallctl, which obsoletes "arenas.purge". + - Define LG_QUANTUM for hppa. + + Incompatible changes: + - Disable tcache by default if running inside Valgrind, in order to avoid + making unallocated objects appear reachable to Valgrind. + - Drop const from malloc_usable_size() argument on Linux. + + Bug fixes: + - Fix heap profiling crash if sampled object is freed via realloc(p, 0). + - Remove const from __*_hook variable declarations, so that glibc can modify + them during process forking. + - Fix mlockall(2)/madvise(2) interaction. + - Fix fork(2)-related deadlocks. + - Fix error return value for "thread.tcache.enabled" mallctl. + +* 3.0.0 (May 11, 2012) + + Although this version adds some major new features, the primary focus is on + internal code cleanup that facilitates maintainability and portability, most + of which is not reflected in the ChangeLog. This is the first release to + incorporate substantial contributions from numerous other developers, and the + result is a more broadly useful allocator (see the git revision history for + contribution details). Note that the license has been unified, thanks to + Facebook granting a license under the same terms as the other copyright + holders (see COPYING). + + New features: + - Implement Valgrind support, redzones, and quarantine. + - Add support for additional platforms: + + FreeBSD + + Mac OS X Lion + + MinGW + + Windows (no support yet for replacing the system malloc) + - Add support for additional architectures: + + MIPS + + SH4 + + Tilera + - Add support for cross compiling. + - Add nallocm(), which rounds a request size up to the nearest size class + without actually allocating. + - Implement aligned_alloc() (blame C11). + - Add the "thread.tcache.enabled" mallctl. + - Add the "opt.prof_final" mallctl. + - Update pprof (from gperftools 2.0). + - Add the --with-mangling option. + - Add the --disable-experimental option. + - Add the --disable-munmap option, and make it the default on Linux. + - Add the --enable-mremap option, which disables use of mremap(2) by default. + + Incompatible changes: + - Enable stats by default. + - Enable fill by default. + - Disable lazy locking by default. + - Rename the "tcache.flush" mallctl to "thread.tcache.flush". + - Rename the "arenas.pagesize" mallctl to "arenas.page". + - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). + - Change the "opt.prof_accum" default from true to false. + + Removed features: + - Remove the swap feature, including the "config.swap", "swap.avail", + "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. + - Remove highruns statistics, including the + "stats.arenas..bins..highruns" and + "stats.arenas..lruns..highruns" mallctls. + - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", + "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and + "arenas.[tqcs]bins" mallctls. + - Remove the "arenas.chunksize" mallctl. + - Remove the "opt.lg_prof_tcmax" option. + - Remove the "opt.lg_prof_bt_max" option. + - Remove the "opt.lg_tcache_gc_sweep" option. + - Remove the --disable-tiny option, including the "config.tiny" mallctl. + - Remove the --enable-dynamic-page-shift configure option. + - Remove the --enable-sysv configure option. + + Bug fixes: + - Fix a statistics-related bug in the "thread.arena" mallctl that could cause + invalid statistics and crashes. + - Work around TLS deallocation via free() on Linux. This bug could cause + write-after-free memory corruption. + - Fix a potential deadlock that could occur during interval- and + growth-triggered heap profile dumps. + - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags. + - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could + cause memory corruption and crashes with --enable-dss specified. + - Fix fork-related bugs that could cause deadlock in children between fork + and exec. + - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. + - Fix realloc(p, 0) to act like free(p). + - Do not enforce minimum alignment in memalign(). + - Check for NULL pointer in malloc_usable_size(). + - Fix an off-by-one heap profile statistics bug that could be observed in + interval- and growth-triggered heap profiles. + - Fix the "epoch" mallctl to update cached stats even if the passed in epoch + is 0. + - Fix bin->runcur management to fix a layout policy bug. This bug did not + affect correctness. + - Fix a bug in choose_arena_hard() that potentially caused more arenas to be + initialized than necessary. + - Add missing "opt.lg_tcache_max" mallctl implementation. + - Use glibc allocator hooks to make mixed allocator usage less likely. + - Fix build issues for --disable-tcache. + - Don't mangle pthread_create() when --with-private-namespace is specified. + +* 2.2.5 (November 14, 2011) + + Bug fixes: + - Fix huge_ralloc() race when using mremap(2). This is a serious bug that + could cause memory corruption and/or crashes. + - Fix huge_ralloc() to maintain chunk statistics. + - Fix malloc_stats_print(..., "a") output. + +* 2.2.4 (November 5, 2011) + + Bug fixes: + - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as + well as for --disable-tls builds in earlier releases. + - Do not assume a 4 KiB page size in test/rallocm.c. + +* 2.2.3 (August 31, 2011) + + This version fixes numerous bugs related to heap profiling. + + Bug fixes: + - Fix a prof-related race condition. This bug could cause memory corruption, + but only occurred in non-default configurations (prof_accum:false). + - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is + excluded from backtraces). + - Fix a prof-related bug in realloc() (only triggered by OOM errors). + - Fix prof-related bugs in allocm() and rallocm(). + - Fix prof_tdata_cleanup() for --disable-tls builds. + - Fix a relative include path, to fix objdir builds. + +* 2.2.2 (July 30, 2011) + + Bug fixes: + - Fix a build error for --disable-tcache. + - Fix assertions in arena_purge() (for real this time). + - Add the --with-private-namespace option. This is a workaround for symbol + conflicts that can inadvertently arise when using static libraries. + +* 2.2.1 (March 30, 2011) + + Bug fixes: + - Implement atomic operations for x86/x64. This fixes compilation failures + for versions of gcc that are still in wide use. + - Fix an assertion in arena_purge(). + +* 2.2.0 (March 22, 2011) + + This version incorporates several improvements to algorithms and data + structures that tend to reduce fragmentation and increase speed. + + New features: + - Add the "stats.cactive" mallctl. + - Update pprof (from google-perftools 1.7). + - Improve backtracing-related configuration logic, and add the + --disable-prof-libgcc option. + + Bug fixes: + - Change default symbol visibility from "internal", to "hidden", which + decreases the overhead of library-internal function calls. + - Fix symbol visibility so that it is also set on OS X. + - Fix a build dependency regression caused by the introduction of the .pic.o + suffix for PIC object files. + - Add missing checks for mutex initialization failures. + - Don't use libgcc-based backtracing except on x64, where it is known to work. + - Fix deadlocks on OS X that were due to memory allocation in + pthread_mutex_lock(). + - Heap profiling-specific fixes: + + Fix memory corruption due to integer overflow in small region index + computation, when using a small enough sample interval that profiling + context pointers are stored in small run headers. + + Fix a bootstrap ordering bug that only occurred with TLS disabled. + + Fix a rallocm() rsize bug. + + Fix error detection bugs for aligned memory allocation. + +* 2.1.3 (March 14, 2011) + + Bug fixes: + - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix + for OS X in 2.1.2). + - Fix a "thread.arena" mallctl bug. + - Fix a thread cache stats merging bug. + +* 2.1.2 (March 2, 2011) + + Bug fixes: + - Fix "thread.{de,}allocatedp" mallctl for OS X. + - Add missing jemalloc.a to build system. + +* 2.1.1 (January 31, 2011) + + Bug fixes: + - Fix aligned huge reallocation (affected allocm()). + - Fix the ALLOCM_LG_ALIGN macro definition. + - Fix a heap dumping deadlock. + - Fix a "thread.arena" mallctl bug. + +* 2.1.0 (December 3, 2010) + + This version incorporates some optimizations that can't quite be considered + bug fixes. + + New features: + - Use Linux's mremap(2) for huge object reallocation when possible. + - Avoid locking in mallctl*() when possible. + - Add the "thread.[de]allocatedp" mallctl's. + - Convert the manual page source from roff to DocBook, and generate both roff + and HTML manuals. + + Bug fixes: + - Fix a crash due to incorrect bootstrap ordering. This only impacted + --enable-debug --enable-dss configurations. + - Fix a minor statistics bug for mallctl("swap.avail", ...). + +* 2.0.1 (October 29, 2010) + + Bug fixes: + - Fix a race condition in heap profiling that could cause undefined behavior + if "opt.prof_accum" were disabled. + - Add missing mutex unlocks for some OOM error paths in the heap profiling + code. + - Fix a compilation error for non-C99 builds. + +* 2.0.0 (October 24, 2010) + + This version focuses on the experimental *allocm() API, and on improved + run-time configuration/introspection. Nonetheless, numerous performance + improvements are also included. + + New features: + - Implement the experimental {,r,s,d}allocm() API, which provides a superset + of the functionality available via malloc(), calloc(), posix_memalign(), + realloc(), malloc_usable_size(), and free(). These functions can be used to + allocate/reallocate aligned zeroed memory, ask for optional extra memory + during reallocation, prevent object movement during reallocation, etc. + - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is + more human-readable, and more flexible. For example: + JEMALLOC_OPTIONS=AJP + is now: + MALLOC_CONF=abort:true,fill:true,stats_print:true + - Port to Apple OS X. Sponsored by Mozilla. + - Make it possible for the application to control thread-->arena mappings via + the "thread.arena" mallctl. + - Add compile-time support for all TLS-related functionality via pthreads TSD. + This is mainly of interest for OS X, which does not support TLS, but has a + TSD implementation with similar performance. + - Override memalign() and valloc() if they are provided by the system. + - Add the "arenas.purge" mallctl, which can be used to synchronously purge all + dirty unused pages. + - Make cumulative heap profiling data optional, so that it is possible to + limit the amount of memory consumed by heap profiling data structures. + - Add per thread allocation counters that can be accessed via the + "thread.allocated" and "thread.deallocated" mallctls. + + Incompatible changes: + - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). + - Increase default backtrace depth from 4 to 128 for heap profiling. + - Disable interval-based profile dumps by default. + + Bug fixes: + - Remove bad assertions in fork handler functions. These assertions could + cause aborts for some combinations of configure settings. + - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. + - Fix leak context reporting. This bug tended to cause the number of contexts + to be underreported (though the reported number of objects and bytes were + correct). + - Fix a realloc() bug for large in-place growing reallocation. This bug could + cause memory corruption, but it was hard to trigger. + - Fix an allocation bug for small allocations that could be triggered if + multiple threads raced to create a new run of backing pages. + - Enhance the heap profiler to trigger samples based on usable size, rather + than request size. + - Fix a heap profiling bug due to sometimes losing track of requested object + size for sampled objects. + +* 1.0.3 (August 12, 2010) + + Bug fixes: + - Fix the libunwind-based implementation of stack backtracing (used for heap + profiling). This bug could cause zero-length backtraces to be reported. + - Add a missing mutex unlock in library initialization code. If multiple + threads raced to initialize malloc, some of them could end up permanently + blocked. + +* 1.0.2 (May 11, 2010) + + Bug fixes: + - Fix junk filling of large objects, which could cause memory corruption. + - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual + memory limits could cause swap file configuration to fail. Contributed by + Jordan DeLong. + +* 1.0.1 (April 14, 2010) + + Bug fixes: + - Fix compilation when --enable-fill is specified. + - Fix threads-related profiling bugs that affected accuracy and caused memory + to be leaked during thread exit. + - Fix dirty page purging race conditions that could cause crashes. + - Fix crash in tcache flushing code during thread destruction. + +* 1.0.0 (April 11, 2010) + + This release focuses on speed and run-time introspection. Numerous + algorithmic improvements make this release substantially faster than its + predecessors. + + New features: + - Implement autoconf-based configuration system. + - Add mallctl*(), for the purposes of introspection and run-time + configuration. + - Make it possible for the application to manually flush a thread's cache, via + the "tcache.flush" mallctl. + - Base maximum dirty page count on proportion of active memory. + - Compute various additional run-time statistics, including per size class + statistics for large objects. + - Expose malloc_stats_print(), which can be called repeatedly by the + application. + - Simplify the malloc_message() signature to only take one string argument, + and incorporate an opaque data pointer argument for use by the application + in combination with malloc_stats_print(). + - Add support for allocation backed by one or more swap files, and allow the + application to disable over-commit if swap files are in use. + - Implement allocation profiling and leak checking. + + Removed features: + - Remove the dynamic arena rebalancing code, since thread-specific caching + reduces its utility. + + Bug fixes: + - Modify chunk allocation to work when address space layout randomization + (ASLR) is in use. + - Fix thread cleanup bugs related to TLS destruction. + - Handle 0-size allocation requests in posix_memalign(). + - Fix a chunk leak. The leaked chunks were never touched, so this impacted + virtual memory usage, but not physical memory usage. + +* linux_2008082[78]a (August 27/28, 2008) + + These snapshot releases are the simple result of incorporating Linux-specific + support into the FreeBSD malloc sources. + +-------------------------------------------------------------------------------- +vim:filetype=text:textwidth=80 diff --git a/sources/jemalloc/include-linux/jemalloc/internal/arena.h b/sources/jemalloc/include-linux/jemalloc/internal/arena.h new file mode 100755 index 00000000..119e3a59 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/arena.h @@ -0,0 +1,1528 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) + +/* Maximum number of regions in one run. */ +#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN) +#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) + +/* + * Minimum redzone size. Redzones may be larger than this if necessary to + * preserve region alignment. + */ +#define REDZONE_MINSIZE 16 + +/* + * The minimum ratio of active:dirty pages per arena is computed as: + * + * (nactive >> lg_dirty_mult) >= ndirty + * + * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as + * many active pages as dirty pages. + */ +#define LG_DIRTY_MULT_DEFAULT 3 + +typedef enum { + purge_mode_ratio = 0, + purge_mode_decay = 1, + + purge_mode_limit = 2 +} purge_mode_t; +#define PURGE_DEFAULT purge_mode_ratio +/* Default decay time in seconds. */ +#define DECAY_TIME_DEFAULT 10 +/* Number of event ticks between time checks. */ +#define DECAY_NTICKS_PER_UPDATE 1000 + +typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; +typedef struct arena_avail_links_s arena_avail_links_t; +typedef struct arena_run_s arena_run_t; +typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; +typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; +typedef struct arena_chunk_s arena_chunk_t; +typedef struct arena_bin_info_s arena_bin_info_t; +typedef struct arena_decay_s arena_decay_t; +typedef struct arena_bin_s arena_bin_t; +typedef struct arena_s arena_t; +typedef struct arena_tdata_s arena_tdata_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#ifdef JEMALLOC_ARENA_STRUCTS_A +struct arena_run_s { + /* Index of bin this run is associated with. */ + szind_t binind; + + /* Number of free regions in run. */ + unsigned nfree; + + /* Per region allocated/deallocated bitmap. */ + bitmap_t bitmap[BITMAP_GROUPS_MAX]; +}; + +/* Each element of the chunk map corresponds to one page within the chunk. */ +struct arena_chunk_map_bits_s { + /* + * Run address (or size) and various flags are stored together. The bit + * layout looks like (assuming 32-bit system): + * + * ???????? ???????? ???nnnnn nnndumla + * + * ? : Unallocated: Run address for first/last pages, unset for internal + * pages. + * Small: Run page offset. + * Large: Run page count for first page, unset for trailing pages. + * n : binind for small size class, BININD_INVALID for large size class. + * d : dirty? + * u : unzeroed? + * m : decommitted? + * l : large? + * a : allocated? + * + * Following are example bit patterns for the three types of runs. + * + * p : run page offset + * s : run size + * n : binind for size class; large objects set these to BININD_INVALID + * x : don't care + * - : 0 + * + : 1 + * [DUMLA] : bit set + * [dumla] : bit unset + * + * Unallocated (clean): + * ssssssss ssssssss sss+++++ +++dum-a + * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx + * ssssssss ssssssss sss+++++ +++dUm-a + * + * Unallocated (dirty): + * ssssssss ssssssss sss+++++ +++D-m-a + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * ssssssss ssssssss sss+++++ +++D-m-a + * + * Small: + * pppppppp pppppppp pppnnnnn nnnd---A + * pppppppp pppppppp pppnnnnn nnn----A + * pppppppp pppppppp pppnnnnn nnnd---A + * + * Large: + * ssssssss ssssssss sss+++++ +++D--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + * + * Large (sampled, size <= LARGE_MINCLASS): + * ssssssss ssssssss sssnnnnn nnnD--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + * + * Large (not sampled, size == LARGE_MINCLASS): + * ssssssss ssssssss sss+++++ +++D--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + */ + size_t bits; +#define CHUNK_MAP_ALLOCATED ((size_t)0x01U) +#define CHUNK_MAP_LARGE ((size_t)0x02U) +#define CHUNK_MAP_STATE_MASK ((size_t)0x3U) + +#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U) +#define CHUNK_MAP_UNZEROED ((size_t)0x08U) +#define CHUNK_MAP_DIRTY ((size_t)0x10U) +#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU) + +#define CHUNK_MAP_BININD_SHIFT 5 +#define BININD_INVALID ((size_t)0xffU) +#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) +#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK + +#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8) +#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE) +#define CHUNK_MAP_SIZE_MASK \ + (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK)) +}; + +struct arena_runs_dirty_link_s { + qr(arena_runs_dirty_link_t) rd_link; +}; + +/* + * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just + * like arena_chunk_map_bits_t. Two separate arrays are stored within each + * chunk header in order to improve cache locality. + */ +struct arena_chunk_map_misc_s { + /* + * Linkage for run heaps. There are two disjoint uses: + * + * 1) arena_t's runs_avail heaps. + * 2) arena_run_t conceptually uses this linkage for in-use non-full + * runs, rather than directly embedding linkage. + */ + phn(arena_chunk_map_misc_t) ph_link; + + union { + /* Linkage for list of dirty runs. */ + arena_runs_dirty_link_t rd; + + /* Profile counters, used for large object runs. */ + union { + void *prof_tctx_pun; + prof_tctx_t *prof_tctx; + }; + + /* Small region run metadata. */ + arena_run_t run; + }; +}; +typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; +#endif /* JEMALLOC_ARENA_STRUCTS_A */ + +#ifdef JEMALLOC_ARENA_STRUCTS_B +/* Arena chunk header. */ +struct arena_chunk_s { + /* + * A pointer to the arena that owns the chunk is stored within the node. + * This field as a whole is used by chunks_rtree to support both + * ivsalloc() and core-based debugging. + */ + extent_node_t node; + + /* + * True if memory could be backed by transparent huge pages. This is + * only directly relevant to Linux, since it is the only supported + * platform on which jemalloc interacts with explicit transparent huge + * page controls. + */ + bool hugepage; + + /* + * Map of pages within chunk that keeps track of free/large/small. The + * first map_bias entries are omitted, since the chunk header does not + * need to be tracked in the map. This omission saves a header page + * for common chunk sizes (e.g. 4 MiB). + */ + arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */ +}; + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each run has the following layout: + * + * /--------------------\ + * | pad? | + * |--------------------| + * | redzone | + * reg0_offset | region 0 | + * | redzone | + * |--------------------| \ + * | redzone | | + * | region 1 | > reg_interval + * | redzone | / + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | redzone | + * | region nregs-1 | + * | redzone | + * |--------------------| + * | alignment pad? | + * \--------------------/ + * + * reg_interval has at least the same minimum alignment as reg_size; this + * preserves the alignment constraint that sa2u() depends on. Alignment pad is + * either 0 or redzone_size; it is present only if needed to align reg0_offset. + */ +struct arena_bin_info_s { + /* Size of regions in a run for this bin's size class. */ + size_t reg_size; + + /* Redzone size. */ + size_t redzone_size; + + /* Interval between regions (reg_size + (redzone_size << 1)). */ + size_t reg_interval; + + /* Total size of a run for this bin's size class. */ + size_t run_size; + + /* Total number of regions in a run for this bin's size class. */ + uint32_t nregs; + + /* + * Metadata used to manipulate bitmaps for runs associated with this + * bin. + */ + bitmap_info_t bitmap_info; + + /* Offset of first region in a run for this bin's size class. */ + uint32_t reg0_offset; +}; + +struct arena_decay_s { + /* + * Approximate time in seconds from the creation of a set of unused + * dirty pages until an equivalent set of unused dirty pages is purged + * and/or reused. + */ + ssize_t time; + /* time / SMOOTHSTEP_NSTEPS. */ + nstime_t interval; + /* + * Time at which the current decay interval logically started. We do + * not actually advance to a new epoch until sometime after it starts + * because of scheduling and computation delays, and it is even possible + * to completely skip epochs. In all cases, during epoch advancement we + * merge all relevant activity into the most recently recorded epoch. + */ + nstime_t epoch; + /* Deadline randomness generator. */ + uint64_t jitter_state; + /* + * Deadline for current epoch. This is the sum of interval and per + * epoch jitter which is a uniform random variable in [0..interval). + * Epochs always advance by precise multiples of interval, but we + * randomize the deadline to reduce the likelihood of arenas purging in + * lockstep. + */ + nstime_t deadline; + /* + * Number of dirty pages at beginning of current epoch. During epoch + * advancement we use the delta between arena->decay.ndirty and + * arena->ndirty to determine how many dirty pages, if any, were + * generated. + */ + size_t ndirty; + /* + * Trailing log of how many unused dirty pages were generated during + * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last + * element is the most recent epoch. Corresponding epoch times are + * relative to epoch. + */ + size_t backlog[SMOOTHSTEP_NSTEPS]; +}; + +struct arena_bin_s { + /* + * All operations on runcur, runs, and stats require that lock be + * locked. Run allocation/deallocation are protected by the arena lock, + * which may be acquired while holding one or more bin locks, but not + * vise versa. + */ + malloc_mutex_t lock; + + /* + * Current run being used to service allocations of this bin's size + * class. + */ + arena_run_t *runcur; + + /* + * Heap of non-full runs. This heap is used when looking for an + * existing run when runcur is no longer usable. We choose the + * non-full run that is lowest in memory; this policy tends to keep + * objects packed well, and it can also help reduce the number of + * almost-empty chunks. + */ + arena_run_heap_t runs; + + /* Bin statistics. */ + malloc_bin_stats_t stats; +}; + +struct arena_s { + /* This arena's index within the arenas array. */ + unsigned ind; + + /* + * Number of threads currently assigned to this arena, synchronized via + * atomic operations. Each thread has two distinct assignments, one for + * application-serving allocation, and the other for internal metadata + * allocation. Internal metadata must not be allocated from arenas + * created via the arenas.extend mallctl, because the arena..reset + * mallctl indiscriminately discards all allocations for the affected + * arena. + * + * 0: Application allocation. + * 1: Internal metadata allocation. + */ + unsigned nthreads[2]; + + /* + * There are three classes of arena operations from a locking + * perspective: + * 1) Thread assignment (modifies nthreads) is synchronized via atomics. + * 2) Bin-related operations are protected by bin locks. + * 3) Chunk- and run-related operations are protected by this mutex. + */ + malloc_mutex_t lock; + + arena_stats_t stats; + /* + * List of tcaches for extant threads associated with this arena. + * Stats from these are merged incrementally, and at exit if + * opt_stats_print is enabled. + */ + ql_head(tcache_t) tcache_ql; + + uint64_t prof_accumbytes; + + /* + * PRNG state for cache index randomization of large allocation base + * pointers. + */ + size_t offset_state; + + dss_prec_t dss_prec; + + /* Extant arena chunks. */ + ql_head(extent_node_t) achunks; + + /* Extent serial number generator state. */ + size_t extent_sn_next; + + /* + * In order to avoid rapid chunk allocation/deallocation when an arena + * oscillates right on the cusp of needing a new chunk, cache the most + * recently freed chunk. The spare is left in the arena's chunk trees + * until it is deleted. + * + * There is one spare chunk per arena, rather than one spare total, in + * order to avoid interactions between multiple threads that could make + * a single spare inadequate. + */ + arena_chunk_t *spare; + + /* Minimum ratio (log base 2) of nactive:ndirty. */ + ssize_t lg_dirty_mult; + + /* True if a thread is currently executing arena_purge_to_limit(). */ + bool purging; + + /* Number of pages in active runs and huge regions. */ + size_t nactive; + + /* + * Current count of pages within unused runs that are potentially + * dirty, and for which madvise(... MADV_DONTNEED) has not been called. + * By tracking this, we can institute a limit on how much dirty unused + * memory is mapped for each arena. + */ + size_t ndirty; + + /* + * Unused dirty memory this arena manages. Dirty memory is conceptually + * tracked as an arbitrarily interleaved LRU of dirty runs and cached + * chunks, but the list linkage is actually semi-duplicated in order to + * avoid extra arena_chunk_map_misc_t space overhead. + * + * LRU-----------------------------------------------------------MRU + * + * /-- arena ---\ + * | | + * | | + * |------------| /- chunk -\ + * ...->|chunks_cache|<--------------------------->| /----\ |<--... + * |------------| | |node| | + * | | | | | | + * | | /- run -\ /- run -\ | | | | + * | | | | | | | | | | + * | | | | | | | | | | + * |------------| |-------| |-------| | |----| | + * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----... + * |------------| |-------| |-------| | |----| | + * | | | | | | | | | | + * | | | | | | | \----/ | + * | | \-------/ \-------/ | | + * | | | | + * | | | | + * \------------/ \---------/ + */ + arena_runs_dirty_link_t runs_dirty; + extent_node_t chunks_cache; + + /* Decay-based purging state. */ + arena_decay_t decay; + + /* Extant huge allocations. */ + ql_head(extent_node_t) huge; + /* Synchronizes all huge allocation/update/deallocation. */ + malloc_mutex_t huge_mtx; + + /* + * Trees of chunks that were previously allocated (trees differ only in + * node ordering). These are used when allocating chunks, in an attempt + * to re-use address space. Depending on function, different tree + * orderings are needed, which is why there are two trees with the same + * contents. + */ + extent_tree_t chunks_szsnad_cached; + extent_tree_t chunks_ad_cached; + extent_tree_t chunks_szsnad_retained; + extent_tree_t chunks_ad_retained; + + malloc_mutex_t chunks_mtx; + /* Cache of nodes that were allocated via base_alloc(). */ + ql_head(extent_node_t) node_cache; + malloc_mutex_t node_cache_mtx; + + /* User-configurable chunk hook functions. */ + chunk_hooks_t chunk_hooks; + + /* bins is used to store trees of free regions. */ + arena_bin_t bins[NBINS]; + + /* + * Size-segregated address-ordered heaps of this arena's available runs, + * used for first-best-fit run allocation. Runs are quantized, i.e. + * they reside in the last heap which corresponds to a size class less + * than or equal to the run size. + */ + arena_run_heap_t runs_avail[NPSIZES]; +}; + +/* Used in conjunction with tsd for fast arena-related context lookup. */ +struct arena_tdata_s { + ticker_t decay_ticker; +}; +#endif /* JEMALLOC_ARENA_STRUCTS_B */ + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +static const size_t large_pad = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + PAGE +#else + 0 +#endif + ; + +extern bool opt_thp; +extern purge_mode_t opt_purge; +extern const char *purge_mode_names[]; +extern ssize_t opt_lg_dirty_mult; +extern ssize_t opt_decay_time; + +extern arena_bin_info_t arena_bin_info[NBINS]; + +extern size_t map_bias; /* Number of arena chunk header pages. */ +extern size_t map_misc_offset; +extern size_t arena_maxrun; /* Max run size for arenas. */ +extern size_t large_maxclass; /* Max large size class. */ +extern unsigned nlclasses; /* Number of large size classes. */ +extern unsigned nhclasses; /* Number of huge size classes. */ + +#ifdef JEMALLOC_JET +typedef size_t (run_quantize_t)(size_t); +extern run_quantize_t *run_quantize_floor; +extern run_quantize_t *run_quantize_ceil; +#endif +void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, + bool cache); +void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, + bool cache); +extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); +void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); +void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, size_t *sn, bool *zero); +void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t usize, size_t sn); +void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize); +void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize, size_t sn); +bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize, bool *zero); +ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); +bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, + ssize_t lg_dirty_mult); +ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); +bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); +void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); +void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); +void arena_reset(tsd_t *tsd, arena_t *arena); +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, + tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); +void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, + bool zero); +#ifdef JEMALLOC_JET +typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, + uint8_t); +extern arena_redzone_corruption_t *arena_redzone_corruption; +typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); +extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; +#else +void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); +#endif +void arena_quarantine_junk_small(void *ptr, size_t usize); +void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, + bool zero); +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t ind, bool zero); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); +void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind); +#ifdef JEMALLOC_JET +typedef void (arena_dalloc_junk_large_t)(void *, size_t); +extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; +#else +void arena_dalloc_junk_large(void *ptr, size_t usize); +#endif +void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr); +void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr); +#ifdef JEMALLOC_JET +typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); +extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; +#endif +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t size, size_t extra, bool zero); +void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache); +dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); +bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); +ssize_t arena_lg_dirty_mult_default_get(void); +bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); +ssize_t arena_decay_time_default_get(void); +bool arena_decay_time_default_set(ssize_t decay_time); +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, + unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, + ssize_t *decay_time, size_t *nactive, size_t *ndirty); +void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, + malloc_huge_stats_t *hstats); +unsigned arena_nthreads_get(arena_t *arena, bool internal); +void arena_nthreads_inc(arena_t *arena, bool internal); +void arena_nthreads_dec(arena_t *arena, bool internal); +size_t arena_extent_sn_next(arena_t *arena); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind); +void arena_boot(void); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, + size_t pageind); +const arena_chunk_map_bits_t *arena_bitselm_get_const( + const arena_chunk_t *chunk, size_t pageind); +arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, + size_t pageind); +const arena_chunk_map_misc_t *arena_miscelm_get_const( + const arena_chunk_t *chunk, size_t pageind); +size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); +void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); +arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); +arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); +size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); +const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbitsp_read(const size_t *mapbitsp); +size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_size_decode(size_t mapbits); +size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, + size_t pageind); +szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); +void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); +size_t arena_mapbits_size_encode(size_t size); +void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, + size_t size, size_t flags); +void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, + size_t size); +void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, + size_t flags); +void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, + size_t size, size_t flags); +void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, + szind_t binind); +void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, + size_t runind, szind_t binind, size_t flags); +void arena_metadata_allocated_add(arena_t *arena, size_t size); +void arena_metadata_allocated_sub(arena_t *arena, size_t size); +size_t arena_metadata_allocated_get(arena_t *arena); +bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); +szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); +szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); +size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, + const void *ptr); +prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *old_tctx); +void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); +void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); +void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero, tcache_t *tcache, bool slow_path); +arena_t *arena_aalloc(const void *ptr); +size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); +void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); +void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) +# ifdef JEMALLOC_ARENA_INLINE_A +JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * +arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) +{ + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return (&chunk->map_bits[pageind-map_bias]); +} + +JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * +arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) +{ + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + + (uintptr_t)map_misc_offset) + pageind-map_bias); +} + +JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * +arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + + map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return (pageind); +} + +JEMALLOC_ALWAYS_INLINE void * +arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + + return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) +{ + arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t + *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd)); + + assert(arena_miscelm_to_pageind(miscelm) >= map_bias); + assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); + + return (miscelm); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_run_to_miscelm(arena_run_t *run) +{ + arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t + *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run)); + + assert(arena_miscelm_to_pageind(miscelm) >= map_bias); + assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); + + return (miscelm); +} + +JEMALLOC_ALWAYS_INLINE size_t * +arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) +{ + + return (&arena_bitselm_get_mutable(chunk, pageind)->bits); +} + +JEMALLOC_ALWAYS_INLINE const size_t * +arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbitsp_read(const size_t *mapbitsp) +{ + + return (*mapbitsp); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_size_decode(size_t mapbits) +{ + size_t size; + +#if CHUNK_MAP_SIZE_SHIFT > 0 + size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; +#elif CHUNK_MAP_SIZE_SHIFT == 0 + size = mapbits & CHUNK_MAP_SIZE_MASK; +#else + size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; +#endif + + return (size); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + return (arena_mapbits_size_decode(mapbits)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == + (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); + return (arena_mapbits_size_decode(mapbits)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == + CHUNK_MAP_ALLOCATED); + return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); +} + +JEMALLOC_ALWAYS_INLINE szind_t +arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + szind_t binind; + + mapbits = arena_mapbits_get(chunk, pageind); + binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; + assert(binind < NBINS || binind == BININD_INVALID); + return (binind); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_DIRTY); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_UNZEROED); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_DECOMMITTED); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_LARGE); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) +{ + + *mapbitsp = mapbits; +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_size_encode(size_t size) +{ + size_t mapbits; + +#if CHUNK_MAP_SIZE_SHIFT > 0 + mapbits = size << CHUNK_MAP_SIZE_SHIFT; +#elif CHUNK_MAP_SIZE_SHIFT == 0 + mapbits = size; +#else + mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; +#endif + + assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); + return (mapbits); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, + size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + + assert((size & PAGE_MASK) == 0); + assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); + assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + CHUNK_MAP_BININD_INVALID | flags); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, + size_t size) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); + + assert((size & PAGE_MASK) == 0); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + (mapbits & ~CHUNK_MAP_SIZE_MASK)); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + + assert((flags & CHUNK_MAP_UNZEROED) == flags); + arena_mapbitsp_write(mapbitsp, flags); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, + size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + + assert((size & PAGE_MASK) == 0); + assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); + assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | + CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, + szind_t binind) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); + + assert(binind <= BININD_INVALID); + assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + + large_pad); + arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | + (binind << CHUNK_MAP_BININD_SHIFT)); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, + szind_t binind, size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + + assert(binind < BININD_INVALID); + assert(pageind - runind >= map_bias); + assert((flags & CHUNK_MAP_UNZEROED) == flags); + arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | + (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_INLINE void +arena_metadata_allocated_add(arena_t *arena, size_t size) +{ + + atomic_add_z(&arena->stats.metadata_allocated, size); +} + +JEMALLOC_INLINE void +arena_metadata_allocated_sub(arena_t *arena, size_t size) +{ + + atomic_sub_z(&arena->stats.metadata_allocated, size); +} + +JEMALLOC_INLINE size_t +arena_metadata_allocated_get(arena_t *arena) +{ + + return (atomic_read_z(&arena->stats.metadata_allocated)); +} + +JEMALLOC_INLINE bool +arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + assert(prof_interval != 0); + + arena->prof_accumbytes += accumbytes; + if (arena->prof_accumbytes >= prof_interval) { + arena->prof_accumbytes -= prof_interval; + return (true); + } + return (false); +} + +JEMALLOC_INLINE bool +arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + + if (likely(prof_interval == 0)) + return (false); + return (arena_prof_accum_impl(arena, accumbytes)); +} + +JEMALLOC_INLINE bool +arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + + if (likely(prof_interval == 0)) + return (false); + + { + bool ret; + + malloc_mutex_lock(tsdn, &arena->lock); + ret = arena_prof_accum_impl(arena, accumbytes); + malloc_mutex_unlock(tsdn, &arena->lock); + return (ret); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +arena_ptr_small_binind_get(const void *ptr, size_t mapbits) +{ + szind_t binind; + + binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; + + if (config_debug) { + arena_chunk_t *chunk; + arena_t *arena; + size_t pageind; + size_t actual_mapbits; + size_t rpages_ind; + const arena_run_t *run; + arena_bin_t *bin; + szind_t run_binind, actual_binind; + arena_bin_info_t *bin_info; + const arena_chunk_map_misc_t *miscelm; + const void *rpages; + + assert(binind != BININD_INVALID); + assert(binind < NBINS); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = extent_node_arena_get(&chunk->node); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + actual_mapbits = arena_mapbits_get(chunk, pageind); + assert(mapbits == actual_mapbits); + assert(arena_mapbits_large_get(chunk, pageind) == 0); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, + pageind); + miscelm = arena_miscelm_get_const(chunk, rpages_ind); + run = &miscelm->run; + run_binind = run->binind; + bin = &arena->bins[run_binind]; + actual_binind = (szind_t)(bin - arena->bins); + assert(run_binind == actual_binind); + bin_info = &arena_bin_info[actual_binind]; + rpages = arena_miscelm_to_rpages(miscelm); + assert(((uintptr_t)ptr - ((uintptr_t)rpages + + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval + == 0); + } + + return (binind); +} +# endif /* JEMALLOC_ARENA_INLINE_A */ + +# ifdef JEMALLOC_ARENA_INLINE_B +JEMALLOC_INLINE szind_t +arena_bin_index(arena_t *arena, arena_bin_t *bin) +{ + szind_t binind = (szind_t)(bin - arena->bins); + assert(binind < NBINS); + return (binind); +} + +JEMALLOC_INLINE size_t +arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) +{ + size_t diff, interval, shift, regind; + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + void *rpages = arena_miscelm_to_rpages(miscelm); + + /* + * Freeing a pointer lower than region zero can cause assertion + * failure. + */ + assert((uintptr_t)ptr >= (uintptr_t)rpages + + (uintptr_t)bin_info->reg0_offset); + + /* + * Avoid doing division with a variable divisor if possible. Using + * actual division here can reduce allocator throughput by over 20%! + */ + diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages - + bin_info->reg0_offset); + + /* Rescale (factor powers of 2 out of the numerator and denominator). */ + interval = bin_info->reg_interval; + shift = ffs_zu(interval) - 1; + diff >>= shift; + interval >>= shift; + + if (interval == 1) { + /* The divisor was a power of 2. */ + regind = diff; + } else { + /* + * To divide by a number D that is not a power of two we + * multiply by (2^21 / D) and then right shift by 21 positions. + * + * X / D + * + * becomes + * + * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT + * + * We can omit the first three elements, because we never + * divide by 0, and 1 and 2 are both powers of two, which are + * handled above. + */ +#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS) +#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1) + static const size_t interval_invs[] = { + SIZE_INV(3), + SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), + SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), + SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), + SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), + SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), + SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), + SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) + }; + + if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t)) + + 2))) { + regind = (diff * interval_invs[interval - 3]) >> + SIZE_INV_SHIFT; + } else + regind = diff / interval; +#undef SIZE_INV +#undef SIZE_INV_SHIFT + } + assert(diff == regind * interval); + assert(regind < bin_info->nregs); + + return (regind); +} + +JEMALLOC_INLINE prof_tctx_t * +arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) +{ + prof_tctx_t *ret; + arena_chunk_t *chunk; + + cassert(config_prof); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) + ret = (prof_tctx_t *)(uintptr_t)1U; + else { + arena_chunk_map_misc_t *elm = + arena_miscelm_get_mutable(chunk, pageind); + ret = atomic_read_p(&elm->prof_tctx_pun); + } + } else + ret = huge_prof_tctx_get(tsdn, ptr); + + return (ret); +} + +JEMALLOC_INLINE void +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) +{ + arena_chunk_t *chunk; + + cassert(config_prof); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + + if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > + (uintptr_t)1U)) { + arena_chunk_map_misc_t *elm; + + assert(arena_mapbits_large_get(chunk, pageind) != 0); + + elm = arena_miscelm_get_mutable(chunk, pageind); + atomic_write_p(&elm->prof_tctx_pun, tctx); + } else { + /* + * tctx must always be initialized for large runs. + * Assert that the surrounding conditional logic is + * equivalent to checking whether ptr refers to a large + * run. + */ + assert(arena_mapbits_large_get(chunk, pageind) == 0); + } + } else + huge_prof_tctx_set(tsdn, ptr, tctx); +} + +JEMALLOC_INLINE void +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *old_tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && + (uintptr_t)old_tctx > (uintptr_t)1U))) { + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind; + arena_chunk_map_misc_t *elm; + + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> + LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != + 0); + assert(arena_mapbits_large_get(chunk, pageind) != 0); + + elm = arena_miscelm_get_mutable(chunk, pageind); + atomic_write_p(&elm->prof_tctx_pun, + (prof_tctx_t *)(uintptr_t)1U); + } else + huge_prof_tctx_reset(tsdn, ptr); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) +{ + tsd_t *tsd; + ticker_t *decay_ticker; + + if (unlikely(tsdn_null(tsdn))) + return; + tsd = tsdn_tsd(tsdn); + decay_ticker = decay_ticker_get(tsd, arena->ind); + if (unlikely(decay_ticker == NULL)) + return; + if (unlikely(ticker_ticks(decay_ticker, nticks))) + arena_purge(tsdn, arena, false); +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_tick(tsdn_t *tsdn, arena_t *arena) +{ + + arena_decay_ticks(tsdn, arena, 1); +} + +JEMALLOC_ALWAYS_INLINE void * +arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, + tcache_t *tcache, bool slow_path) +{ + + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(size != 0); + + if (likely(tcache != NULL)) { + if (likely(size <= SMALL_MAXCLASS)) { + return (tcache_alloc_small(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); + } + if (likely(size <= tcache_maxclass)) { + return (tcache_alloc_large(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); + } + /* (size > tcache_maxclass) case falls through. */ + assert(size > tcache_maxclass); + } + + return (arena_malloc_hard(tsdn, arena, size, ind, zero)); +} + +JEMALLOC_ALWAYS_INLINE arena_t * +arena_aalloc(const void *ptr) +{ + arena_chunk_t *chunk; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) + return (extent_node_arena_get(&chunk->node)); + else + return (huge_aalloc(ptr)); +} + +/* Return the size of the allocation pointed to by ptr. */ +JEMALLOC_ALWAYS_INLINE size_t +arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) +{ + size_t ret; + arena_chunk_t *chunk; + size_t pageind; + szind_t binind; + + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + binind = arena_mapbits_binind_get(chunk, pageind); + if (unlikely(binind == BININD_INVALID || (config_prof && !demote + && arena_mapbits_large_get(chunk, pageind) != 0))) { + /* + * Large allocation. In the common case (demote), and + * as this is an inline function, most callers will only + * end up looking at binind to determine that ptr is a + * small allocation. + */ + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + ret = arena_mapbits_large_size_get(chunk, pageind) - + large_pad; + assert(ret != 0); + assert(pageind + ((ret+large_pad)>>LG_PAGE) <= + chunk_npages); + assert(arena_mapbits_dirty_get(chunk, pageind) == + arena_mapbits_dirty_get(chunk, + pageind+((ret+large_pad)>>LG_PAGE)-1)); + } else { + /* + * Small allocation (possibly promoted to a large + * object). + */ + assert(arena_mapbits_large_get(chunk, pageind) != 0 || + arena_ptr_small_binind_get(ptr, + arena_mapbits_get(chunk, pageind)) == binind); + ret = index2size(binind); + } + } else + ret = huge_salloc(tsdn, ptr); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) +{ + arena_chunk_t *chunk; + size_t pageind, mapbits; + + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapbits = arena_mapbits_get(chunk, pageind); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { + /* Small allocation. */ + if (likely(tcache != NULL)) { + szind_t binind = arena_ptr_small_binind_get(ptr, + mapbits); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); + } else { + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); + } + } else { + size_t size = arena_mapbits_large_size_get(chunk, + pageind); + + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + + if (likely(tcache != NULL) && size - large_pad <= + tcache_maxclass) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size - large_pad, slow_path); + } else { + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); + } + } + } else + huge_dalloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE void +arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) +{ + arena_chunk_t *chunk; + + assert(!tsdn_null(tsdn) || tcache == NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + if (config_prof && opt_prof) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> + LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != + 0); + if (arena_mapbits_large_get(chunk, pageind) != 0) { + /* + * Make sure to use promoted size, not request + * size. + */ + size = arena_mapbits_large_size_get(chunk, + pageind) - large_pad; + } + } + assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); + + if (likely(size <= SMALL_MAXCLASS)) { + /* Small allocation. */ + if (likely(tcache != NULL)) { + szind_t binind = size2index(size); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); + } else { + size_t pageind = ((uintptr_t)ptr - + (uintptr_t)chunk) >> LG_PAGE; + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); + } + } else { + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + + if (likely(tcache != NULL) && size <= tcache_maxclass) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size, slow_path); + } else { + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); + } + } + } else + huge_dalloc(tsdn, ptr); +} +# endif /* JEMALLOC_ARENA_INLINE_B */ +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/assert.h b/sources/jemalloc/include-linux/jemalloc/internal/assert.h new file mode 100755 index 00000000..6f8f7eb9 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/assert.h @@ -0,0 +1,45 @@ +/* + * Define a custom assert() in order to reduce the chances of deadlock during + * assertion failure. + */ +#ifndef assert +#define assert(e) do { \ + if (unlikely(config_debug && !(e))) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef not_reached +#define not_reached() do { \ + if (config_debug) { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ + unreachable(); \ +} while (0) +#endif + +#ifndef not_implemented +#define not_implemented() do { \ + if (config_debug) { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef assert_not_implemented +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) \ + not_implemented(); \ +} while (0) +#endif + + diff --git a/sources/jemalloc/include-linux/jemalloc/internal/atomic.h b/sources/jemalloc/include-linux/jemalloc/internal/atomic.h new file mode 100755 index 00000000..3f15ea14 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/atomic.h @@ -0,0 +1,651 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#define atomic_read_uint64(p) atomic_add_uint64(p, 0) +#define atomic_read_uint32(p) atomic_add_uint32(p, 0) +#define atomic_read_p(p) atomic_add_p(p, NULL) +#define atomic_read_z(p) atomic_add_z(p, 0) +#define atomic_read_u(p) atomic_add_u(p, 0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +/* + * All arithmetic functions return the arithmetic result of the atomic + * operation. Some atomic operation APIs return the value prior to mutation, in + * which case the following functions must redundantly compute the result so + * that it can be returned. These functions are normally inlined, so the extra + * operations can be optimized away if the return values aren't used by the + * callers. + * + * atomic_read_( *p) { return (*p); } + * atomic_add_( *p, x) { return (*p += x); } + * atomic_sub_( *p, x) { return (*p -= x); } + * bool atomic_cas_( *p, c, s) + * { + * if (*p != c) + * return (true); + * *p = s; + * return (false); + * } + * void atomic_write_( *p, x) { *p = x; } + */ + +#ifndef JEMALLOC_ENABLE_INLINE +uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); +uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); +bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); +void atomic_write_uint64(uint64_t *p, uint64_t x); +uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); +uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); +bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); +void atomic_write_uint32(uint32_t *p, uint32_t x); +void *atomic_add_p(void **p, void *x); +void *atomic_sub_p(void **p, void *x); +bool atomic_cas_p(void **p, void *c, void *s); +void atomic_write_p(void **p, const void *x); +size_t atomic_add_z(size_t *p, size_t x); +size_t atomic_sub_z(size_t *p, size_t x); +bool atomic_cas_z(size_t *p, size_t c, size_t s); +void atomic_write_z(size_t *p, size_t x); +unsigned atomic_add_u(unsigned *p, unsigned x); +unsigned atomic_sub_u(unsigned *p, unsigned x); +bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); +void atomic_write_u(unsigned *p, unsigned x); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) +/******************************************************************************/ +/* 64-bit operations. */ +#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) +# if (defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + uint64_t t = x; + + asm volatile ( + "lock; xaddq %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + uint64_t t; + + x = (uint64_t)(-(int64_t)x); + t = x; + asm volatile ( + "lock; xaddq %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + uint8_t success; + + asm volatile ( + "lock; cmpxchgq %4, %0;" + "sete %1;" + : "=m" (*p), "=a" (success) /* Outputs. */ + : "m" (*p), "a" (c), "r" (s) /* Inputs. */ + : "memory" /* Clobbers. */ + ); + + return (!(bool)success); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + asm volatile ( + "xchgq %1, %0;" /* Lock is implied by xchgq. */ + : "=m" (*p), "+r" (x) /* Outputs. */ + : "m" (*p) /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +# elif (defined(JEMALLOC_C11ATOMICS)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (atomic_fetch_add(a, x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (atomic_fetch_sub(a, x) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (!atomic_compare_exchange_strong(a, &c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + atomic_store(a, x); +} +# elif (defined(JEMALLOC_ATOMIC9)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + /* + * atomic_fetchadd_64() doesn't exist, but we only ever use this + * function on LP64 systems, so atomic_fetchadd_long() will do. + */ + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (atomic_fetchadd_long(p, (unsigned long)x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + atomic_store_rel_long(p, x); +} +# elif (defined(JEMALLOC_OSATOMIC)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + uint64_t o; + + /*The documented OSAtomic*() API does not expose an atomic exchange. */ + do { + o = atomic_read_uint64(p); + } while (atomic_cas_uint64(p, o, x)); +} +# elif (defined(_MSC_VER)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (InterlockedExchangeAdd64(p, x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + uint64_t o; + + o = InterlockedCompareExchange64(p, s, c); + return (o != c); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + InterlockedExchange64(p, x); +} +# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ + defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + return (!__sync_bool_compare_and_swap(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + __sync_lock_test_and_set(p, x); +} +# else +# error "Missing implementation for 64-bit atomic operations" +# endif +#endif + +/******************************************************************************/ +/* 32-bit operations. */ +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + uint32_t t = x; + + asm volatile ( + "lock; xaddl %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + uint32_t t; + + x = (uint32_t)(-(int32_t)x); + t = x; + asm volatile ( + "lock; xaddl %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + uint8_t success; + + asm volatile ( + "lock; cmpxchgl %4, %0;" + "sete %1;" + : "=m" (*p), "=a" (success) /* Outputs. */ + : "m" (*p), "a" (c), "r" (s) /* Inputs. */ + : "memory" + ); + + return (!(bool)success); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + asm volatile ( + "xchgl %1, %0;" /* Lock is implied by xchgl. */ + : "=m" (*p), "+r" (x) /* Outputs. */ + : "m" (*p) /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +# elif (defined(JEMALLOC_C11ATOMICS)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (atomic_fetch_add(a, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (atomic_fetch_sub(a, x) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (!atomic_compare_exchange_strong(a, &c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + atomic_store(a, x); +} +#elif (defined(JEMALLOC_ATOMIC9)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (atomic_fetchadd_32(p, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!atomic_cmpset_32(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + atomic_store_rel_32(p, x); +} +#elif (defined(JEMALLOC_OSATOMIC)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + uint32_t o; + + /*The documented OSAtomic*() API does not expose an atomic exchange. */ + do { + o = atomic_read_uint32(p); + } while (atomic_cas_uint32(p, o, x)); +} +#elif (defined(_MSC_VER)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (InterlockedExchangeAdd(p, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + uint32_t o; + + o = InterlockedCompareExchange(p, s, c); + return (o != c); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + InterlockedExchange(p, x); +} +#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ + defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!__sync_bool_compare_and_swap(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + __sync_lock_test_and_set(p, x); +} +#else +# error "Missing implementation for 32-bit atomic operations" +#endif + +/******************************************************************************/ +/* Pointer operations. */ +JEMALLOC_INLINE void * +atomic_add_p(void **p, void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_PTR == 2) + return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE void * +atomic_sub_p(void **p, void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((void *)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_PTR == 2) + return ((void *)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_p(void **p, void *c, void *s) +{ + +#if (LG_SIZEOF_PTR == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_PTR == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_p(void **p, const void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_PTR == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +/* size_t operations. */ +JEMALLOC_INLINE size_t +atomic_add_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_PTR == 2) + return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE size_t +atomic_sub_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((size_t)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_PTR == 2) + return ((size_t)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_z(size_t *p, size_t c, size_t s) +{ + +#if (LG_SIZEOF_PTR == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_PTR == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_PTR == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +/* unsigned operations. */ +JEMALLOC_INLINE unsigned +atomic_add_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_INT == 2) + return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE unsigned +atomic_sub_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + return ((unsigned)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_INT == 2) + return ((unsigned)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_u(unsigned *p, unsigned c, unsigned s) +{ + +#if (LG_SIZEOF_INT == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_INT == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_INT == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/base.h b/sources/jemalloc/include-linux/jemalloc/internal/base.h new file mode 100755 index 00000000..d6b81e16 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/base.h @@ -0,0 +1,25 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *base_alloc(tsdn_t *tsdn, size_t size); +void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, + size_t *mapped); +bool base_boot(void); +void base_prefork(tsdn_t *tsdn); +void base_postfork_parent(tsdn_t *tsdn); +void base_postfork_child(tsdn_t *tsdn); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/bitmap.h b/sources/jemalloc/include-linux/jemalloc/internal/bitmap.h new file mode 100755 index 00000000..36f38b59 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/bitmap.h @@ -0,0 +1,274 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ +#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS +#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) + +typedef struct bitmap_level_s bitmap_level_t; +typedef struct bitmap_info_s bitmap_info_t; +typedef unsigned long bitmap_t; +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG + +/* Number of bits per group. */ +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) + +/* + * Do some analysis on how big the bitmap is before we use a tree. For a brute + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. + */ +#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 +# define USE_TREE +#endif + +/* Number of groups required to store a given number of bits. */ +#define BITMAP_BITS2GROUPS(nbits) \ + ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) + +/* + * Number of groups required at a particular level for a given number of bits. + */ +#define BITMAP_GROUPS_L0(nbits) \ + BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_L1(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) +#define BITMAP_GROUPS_L2(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) +#define BITMAP_GROUPS_L3(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS((nbits))))) + +/* + * Assuming the number of levels, number of groups required for a given number + * of bits. + */ +#define BITMAP_GROUPS_1_LEVEL(nbits) \ + BITMAP_GROUPS_L0(nbits) +#define BITMAP_GROUPS_2_LEVEL(nbits) \ + (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) +#define BITMAP_GROUPS_3_LEVEL(nbits) \ + (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) +#define BITMAP_GROUPS_4_LEVEL(nbits) \ + (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) + +/* + * Maximum number of groups required to support LG_BITMAP_MAXBITS. + */ +#ifdef USE_TREE + +#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) +#else +# error "Unsupported bitmap size" +#endif + +/* Maximum number of levels possible. */ +#define BITMAP_MAX_LEVELS \ + (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) + +#else /* USE_TREE */ + +#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) + +#endif /* USE_TREE */ + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct bitmap_level_s { + /* Offset of this level's groups within the array of groups. */ + size_t group_offset; +}; + +struct bitmap_info_s { + /* Logical number of bits in bitmap (stored at bottom level). */ + size_t nbits; + +#ifdef USE_TREE + /* Number of levels necessary for nbits. */ + unsigned nlevels; + + /* + * Only the first (nlevels+1) elements are used, and levels are ordered + * bottom to top (e.g. the bottom level is stored in levels[0]). + */ + bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; +#else /* USE_TREE */ + /* Number of groups necessary for nbits. */ + size_t ngroups; +#endif /* USE_TREE */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); +size_t bitmap_size(const bitmap_info_t *binfo); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); +bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); +void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) +JEMALLOC_INLINE bool +bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ +#ifdef USE_TREE + size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; + bitmap_t rg = bitmap[rgoff]; + /* The bitmap is full iff the root group is 0. */ + return (rg == 0); +#else + size_t i; + + for (i = 0; i < binfo->ngroups; i++) { + if (bitmap[i] != 0) + return (false); + } + return (true); +#endif +} + +JEMALLOC_INLINE bool +bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t g; + + assert(bit < binfo->nbits); + goff = bit >> LG_BITMAP_GROUP_NBITS; + g = bitmap[goff]; + return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))); +} + +JEMALLOC_INLINE void +bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t *gp; + bitmap_t g; + + assert(bit < binfo->nbits); + assert(!bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(bitmap_get(bitmap, binfo, bit)); +#ifdef USE_TREE + /* Propagate group state transitions up the tree. */ + if (g == 0) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (g != 0) + break; + } + } +#endif +} + +/* sfu: set first unset. */ +JEMALLOC_INLINE size_t +bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t bit; + bitmap_t g; + unsigned i; + + assert(!bitmap_full(bitmap, binfo)); + +#ifdef USE_TREE + i = binfo->nlevels - 1; + g = bitmap[binfo->levels[i].group_offset]; + bit = ffs_lu(g) - 1; + while (i > 0) { + i--; + g = bitmap[binfo->levels[i].group_offset + bit]; + bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); + } +#else + i = 0; + g = bitmap[0]; + while ((bit = ffs_lu(g)) == 0) { + i++; + g = bitmap[i]; + } + bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); +#endif + bitmap_set(bitmap, binfo, bit); + return (bit); +} + +JEMALLOC_INLINE void +bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t *gp; + bitmap_t g; + UNUSED bool propagate; + + assert(bit < binfo->nbits); + assert(bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + propagate = (g == 0); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(!bitmap_get(bitmap, binfo, bit)); +#ifdef USE_TREE + /* Propagate group state transitions up the tree. */ + if (propagate) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + propagate = (g == 0); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) + == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (!propagate) + break; + } + } +#endif /* USE_TREE */ +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/chunk.h b/sources/jemalloc/include-linux/jemalloc/internal/chunk.h new file mode 100755 index 00000000..55df9acc --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/chunk.h @@ -0,0 +1,97 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * Size and alignment of memory chunks that are allocated by the OS's virtual + * memory system. + */ +#define LG_CHUNK_DEFAULT 21 + +/* Return the chunk address for allocation address a. */ +#define CHUNK_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~chunksize_mask)) + +/* Return the chunk offset of address a. */ +#define CHUNK_ADDR2OFFSET(a) \ + ((size_t)((uintptr_t)(a) & chunksize_mask)) + +/* Return the smallest chunk multiple that is >= s. */ +#define CHUNK_CEILING(s) \ + (((s) + chunksize_mask) & ~chunksize_mask) + +#define CHUNK_HOOKS_INITIALIZER { \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL \ +} + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern size_t opt_lg_chunk; +extern const char *opt_dss; + +extern rtree_t chunks_rtree; + +extern size_t chunksize; +extern size_t chunksize_mask; /* (chunksize - 1). */ +extern size_t chunk_npages; + +extern const chunk_hooks_t chunk_hooks_default; + +chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena); +chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, + const chunk_hooks_t *chunk_hooks); + +bool chunk_register(const void *chunk, const extent_node_t *node, + bool *gdump); +void chunk_deregister(const void *chunk, const extent_node_t *node); +void *chunk_alloc_base(size_t size); +void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, + size_t *sn, bool *zero, bool *commit, bool dalloc_node); +void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, + size_t *sn, bool *zero, bool *commit); +void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, + bool committed); +void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, + bool zeroed, bool committed); +bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, + size_t length); +bool chunk_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +extent_node_t *chunk_lookup(const void *chunk, bool dependent); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) +JEMALLOC_INLINE extent_node_t * +chunk_lookup(const void *ptr, bool dependent) +{ + + return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent)); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + +#include "jemalloc/internal/chunk_dss.h" +#include "jemalloc/internal/chunk_mmap.h" diff --git a/sources/jemalloc/include-linux/jemalloc/internal/chunk_dss.h b/sources/jemalloc/include-linux/jemalloc/internal/chunk_dss.h new file mode 100755 index 00000000..da8511ba --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/chunk_dss.h @@ -0,0 +1,37 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef enum { + dss_prec_disabled = 0, + dss_prec_primary = 1, + dss_prec_secondary = 2, + + dss_prec_limit = 3 +} dss_prec_t; +#define DSS_PREC_DEFAULT dss_prec_secondary +#define DSS_DEFAULT "secondary" + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +extern const char *dss_prec_names[]; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +dss_prec_t chunk_dss_prec_get(void); +bool chunk_dss_prec_set(dss_prec_t dss_prec); +void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit); +bool chunk_in_dss(void *chunk); +bool chunk_dss_mergeable(void *chunk_a, void *chunk_b); +void chunk_dss_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/chunk_mmap.h b/sources/jemalloc/include-linux/jemalloc/internal/chunk_mmap.h new file mode 100755 index 00000000..6f2d0ac2 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/chunk_mmap.h @@ -0,0 +1,21 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit); +bool chunk_dalloc_mmap(void *chunk, size_t size); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ckh.h b/sources/jemalloc/include-linux/jemalloc/internal/ckh.h new file mode 100755 index 00000000..f75ad90b --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/ckh.h @@ -0,0 +1,86 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ckh_s ckh_t; +typedef struct ckhc_s ckhc_t; + +/* Typedefs to allow easy function pointer passing. */ +typedef void ckh_hash_t (const void *, size_t[2]); +typedef bool ckh_keycomp_t (const void *, const void *); + +/* Maintain counters used to get an idea of performance. */ +/* #define CKH_COUNT */ +/* Print counter values in ckh_delete() (requires CKH_COUNT). */ +/* #define CKH_VERBOSE */ + +/* + * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit + * one bucket per L1 cache line. + */ +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Hash table cell. */ +struct ckhc_s { + const void *key; + const void *data; +}; + +struct ckh_s { +#ifdef CKH_COUNT + /* Counters used to get an idea of performance. */ + uint64_t ngrows; + uint64_t nshrinks; + uint64_t nshrinkfails; + uint64_t ninserts; + uint64_t nrelocs; +#endif + + /* Used for pseudo-random number generation. */ + uint64_t prng_state; + + /* Total number of items. */ + size_t count; + + /* + * Minimum and current number of hash table buckets. There are + * 2^LG_CKH_BUCKET_CELLS cells per bucket. + */ + unsigned lg_minbuckets; + unsigned lg_curbuckets; + + /* Hash and comparison functions. */ + ckh_hash_t *hash; + ckh_keycomp_t *keycomp; + + /* Hash table with 2^lg_curbuckets buckets. */ + ckhc_t *tab; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp); +void ckh_delete(tsd_t *tsd, ckh_t *ckh); +size_t ckh_count(ckh_t *ckh); +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); +bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data); +bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); +void ckh_string_hash(const void *key, size_t r_hash[2]); +bool ckh_string_keycomp(const void *k1, const void *k2); +void ckh_pointer_hash(const void *key, size_t r_hash[2]); +bool ckh_pointer_keycomp(const void *k1, const void *k2); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ctl.h b/sources/jemalloc/include-linux/jemalloc/internal/ctl.h new file mode 100755 index 00000000..af0f6d7c --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/ctl.h @@ -0,0 +1,118 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ctl_node_s ctl_node_t; +typedef struct ctl_named_node_s ctl_named_node_t; +typedef struct ctl_indexed_node_s ctl_indexed_node_t; +typedef struct ctl_arena_stats_s ctl_arena_stats_t; +typedef struct ctl_stats_s ctl_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct ctl_node_s { + bool named; +}; + +struct ctl_named_node_s { + struct ctl_node_s node; + const char *name; + /* If (nchildren == 0), this is a terminal node. */ + unsigned nchildren; + const ctl_node_t *children; + int (*ctl)(tsd_t *, const size_t *, size_t, void *, + size_t *, void *, size_t); +}; + +struct ctl_indexed_node_s { + struct ctl_node_s node; + const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, + size_t); +}; + +struct ctl_arena_stats_s { + bool initialized; + unsigned nthreads; + const char *dss; + ssize_t lg_dirty_mult; + ssize_t decay_time; + size_t pactive; + size_t pdirty; + + /* The remainder are only populated if config_stats is true. */ + + arena_stats_t astats; + + /* Aggregate stats for small size classes, based on bin stats. */ + size_t allocated_small; + uint64_t nmalloc_small; + uint64_t ndalloc_small; + uint64_t nrequests_small; + + malloc_bin_stats_t bstats[NBINS]; + malloc_large_stats_t *lstats; /* nlclasses elements. */ + malloc_huge_stats_t *hstats; /* nhclasses elements. */ +}; + +struct ctl_stats_s { + size_t allocated; + size_t active; + size_t metadata; + size_t resident; + size_t mapped; + size_t retained; + unsigned narenas; + ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, + size_t *miblenp); + +int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); +bool ctl_boot(void); +void ctl_prefork(tsdn_t *tsdn); +void ctl_postfork_parent(tsdn_t *tsdn); +void ctl_postfork_child(tsdn_t *tsdn); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_printf( \ + ": Failure in xmallctl(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlnametomib(name, mibp, miblenp) do { \ + if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_printf(": Failure in " \ + "xmallctlnametomib(\"%s\", ...)\n", name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ + newlen) != 0) { \ + malloc_write( \ + ": Failure in xmallctlbymib()\n"); \ + abort(); \ + } \ +} while (0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-linux/jemalloc/internal/extent.h b/sources/jemalloc/include-linux/jemalloc/internal/extent.h new file mode 100755 index 00000000..fc77f9f5 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/extent.h @@ -0,0 +1,275 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct extent_node_s extent_node_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Tree of extents. Use accessor functions for en_* fields. */ +struct extent_node_s { + /* Arena from which this extent came, if any. */ + arena_t *en_arena; + + /* Pointer to the extent that this tree node is responsible for. */ + void *en_addr; + + /* Total region size. */ + size_t en_size; + + /* + * Serial number (potentially non-unique). + * + * In principle serial numbers can wrap around on 32-bit systems if + * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall + * back on address comparison for equal serial numbers, stable (if + * imperfect) ordering is maintained. + * + * Serial numbers may not be unique even in the absence of wrap-around, + * e.g. when splitting an extent and assigning the same serial number to + * both resulting adjacent extents. + */ + size_t en_sn; + + /* + * The zeroed flag is used by chunk recycling code to track whether + * memory is zero-filled. + */ + bool en_zeroed; + + /* + * True if physical memory is committed to the extent, whether + * explicitly or implicitly as on a system that overcommits and + * satisfies physical memory needs on demand via soft page faults. + */ + bool en_committed; + + /* + * The achunk flag is used to validate that huge allocation lookups + * don't return arena chunks. + */ + bool en_achunk; + + /* Profile counters, used for huge objects. */ + prof_tctx_t *en_prof_tctx; + + /* Linkage for arena's runs_dirty and chunks_cache rings. */ + arena_runs_dirty_link_t rd; + qr(extent_node_t) cc_link; + + union { + /* Linkage for the size/sn/address-ordered tree. */ + rb_node(extent_node_t) szsnad_link; + + /* Linkage for arena's achunks, huge, and node_cache lists. */ + ql_elm(extent_node_t) ql_link; + }; + + /* Linkage for the address-ordered tree. */ + rb_node(extent_node_t) ad_link; +}; +typedef rb_tree(extent_node_t) extent_tree_t; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_JET +size_t extent_size_quantize_floor(size_t size); +#endif +size_t extent_size_quantize_ceil(size_t size); + +rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t) + +rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +arena_t *extent_node_arena_get(const extent_node_t *node); +void *extent_node_addr_get(const extent_node_t *node); +size_t extent_node_size_get(const extent_node_t *node); +size_t extent_node_sn_get(const extent_node_t *node); +bool extent_node_zeroed_get(const extent_node_t *node); +bool extent_node_committed_get(const extent_node_t *node); +bool extent_node_achunk_get(const extent_node_t *node); +prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); +void extent_node_arena_set(extent_node_t *node, arena_t *arena); +void extent_node_addr_set(extent_node_t *node, void *addr); +void extent_node_size_set(extent_node_t *node, size_t size); +void extent_node_sn_set(extent_node_t *node, size_t sn); +void extent_node_zeroed_set(extent_node_t *node, bool zeroed); +void extent_node_committed_set(extent_node_t *node, bool committed); +void extent_node_achunk_set(extent_node_t *node, bool achunk); +void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); +void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, + size_t size, size_t sn, bool zeroed, bool committed); +void extent_node_dirty_linkage_init(extent_node_t *node); +void extent_node_dirty_insert(extent_node_t *node, + arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); +void extent_node_dirty_remove(extent_node_t *node); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_)) +JEMALLOC_INLINE arena_t * +extent_node_arena_get(const extent_node_t *node) +{ + + return (node->en_arena); +} + +JEMALLOC_INLINE void * +extent_node_addr_get(const extent_node_t *node) +{ + + return (node->en_addr); +} + +JEMALLOC_INLINE size_t +extent_node_size_get(const extent_node_t *node) +{ + + return (node->en_size); +} + +JEMALLOC_INLINE size_t +extent_node_sn_get(const extent_node_t *node) +{ + + return (node->en_sn); +} + +JEMALLOC_INLINE bool +extent_node_zeroed_get(const extent_node_t *node) +{ + + return (node->en_zeroed); +} + +JEMALLOC_INLINE bool +extent_node_committed_get(const extent_node_t *node) +{ + + assert(!node->en_achunk); + return (node->en_committed); +} + +JEMALLOC_INLINE bool +extent_node_achunk_get(const extent_node_t *node) +{ + + return (node->en_achunk); +} + +JEMALLOC_INLINE prof_tctx_t * +extent_node_prof_tctx_get(const extent_node_t *node) +{ + + return (node->en_prof_tctx); +} + +JEMALLOC_INLINE void +extent_node_arena_set(extent_node_t *node, arena_t *arena) +{ + + node->en_arena = arena; +} + +JEMALLOC_INLINE void +extent_node_addr_set(extent_node_t *node, void *addr) +{ + + node->en_addr = addr; +} + +JEMALLOC_INLINE void +extent_node_size_set(extent_node_t *node, size_t size) +{ + + node->en_size = size; +} + +JEMALLOC_INLINE void +extent_node_sn_set(extent_node_t *node, size_t sn) +{ + + node->en_sn = sn; +} + +JEMALLOC_INLINE void +extent_node_zeroed_set(extent_node_t *node, bool zeroed) +{ + + node->en_zeroed = zeroed; +} + +JEMALLOC_INLINE void +extent_node_committed_set(extent_node_t *node, bool committed) +{ + + node->en_committed = committed; +} + +JEMALLOC_INLINE void +extent_node_achunk_set(extent_node_t *node, bool achunk) +{ + + node->en_achunk = achunk; +} + +JEMALLOC_INLINE void +extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) +{ + + node->en_prof_tctx = tctx; +} + +JEMALLOC_INLINE void +extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, + size_t sn, bool zeroed, bool committed) +{ + + extent_node_arena_set(node, arena); + extent_node_addr_set(node, addr); + extent_node_size_set(node, size); + extent_node_sn_set(node, sn); + extent_node_zeroed_set(node, zeroed); + extent_node_committed_set(node, committed); + extent_node_achunk_set(node, false); + if (config_prof) + extent_node_prof_tctx_set(node, NULL); +} + +JEMALLOC_INLINE void +extent_node_dirty_linkage_init(extent_node_t *node) +{ + + qr_new(&node->rd, rd_link); + qr_new(node, cc_link); +} + +JEMALLOC_INLINE void +extent_node_dirty_insert(extent_node_t *node, + arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty) +{ + + qr_meld(runs_dirty, &node->rd, rd_link); + qr_meld(chunks_dirty, node, cc_link); +} + +JEMALLOC_INLINE void +extent_node_dirty_remove(extent_node_t *node) +{ + + qr_remove(&node->rd, rd_link); + qr_remove(node, cc_link); +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-linux/jemalloc/internal/hash.h b/sources/jemalloc/include-linux/jemalloc/internal/hash.h new file mode 100755 index 00000000..1ff2d9a0 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/hash.h @@ -0,0 +1,357 @@ +/* + * The following hash function is based on MurmurHash3, placed into the public + * domain by Austin Appleby. See https://github.com/aappleby/smhasher for + * details. + */ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +uint32_t hash_x86_32(const void *key, int len, uint32_t seed); +void hash_x86_128(const void *key, const int len, uint32_t seed, + uint64_t r_out[2]); +void hash_x64_128(const void *key, const int len, const uint32_t seed, + uint64_t r_out[2]); +void hash(const void *key, size_t len, const uint32_t seed, + size_t r_hash[2]); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) +/******************************************************************************/ +/* Internal implementation. */ +JEMALLOC_INLINE uint32_t +hash_rotl_32(uint32_t x, int8_t r) +{ + + return ((x << r) | (x >> (32 - r))); +} + +JEMALLOC_INLINE uint64_t +hash_rotl_64(uint64_t x, int8_t r) +{ + + return ((x << r) | (x >> (64 - r))); +} + +JEMALLOC_INLINE uint32_t +hash_get_block_32(const uint32_t *p, int i) +{ + + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { + uint32_t ret; + + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); + return (ret); + } + + return (p[i]); +} + +JEMALLOC_INLINE uint64_t +hash_get_block_64(const uint64_t *p, int i) +{ + + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { + uint64_t ret; + + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); + return (ret); + } + + return (p[i]); +} + +JEMALLOC_INLINE uint32_t +hash_fmix_32(uint32_t h) +{ + + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return (h); +} + +JEMALLOC_INLINE uint64_t +hash_fmix_64(uint64_t k) +{ + + k ^= k >> 33; + k *= KQU(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= KQU(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + + return (k); +} + +JEMALLOC_INLINE uint32_t +hash_x86_32(const void *key, int len, uint32_t seed) +{ + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i); + + k1 *= c1; + k1 = hash_rotl_32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = hash_rotl_32(h1, 13); + h1 = h1*5 + 0xe6546b64; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*4); + + uint32_t k1 = 0; + + switch (len & 3) { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); + k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; + + h1 = hash_fmix_32(h1); + + return (h1); +} + +UNUSED JEMALLOC_INLINE void +hash_x86_128(const void *key, const int len, uint32_t seed, + uint64_t r_out[2]) +{ + const uint8_t * data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); + uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); + uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); + uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); + + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_32(h1, 19); h1 += h2; + h1 = h1*5 + 0x561ccd1b; + + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + + h2 = hash_rotl_32(h2, 17); h2 += h3; + h2 = h2*5 + 0x0bcaa747; + + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + + h3 = hash_rotl_32(h3, 15); h3 += h4; + h3 = h3*5 + 0x96cd1c35; + + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + + h4 = hash_rotl_32(h4, 13); h4 += h1; + h4 = h4*5 + 0x32ac3b17; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*16); + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch (len & 15) { + case 15: k4 ^= tail[14] << 16; + case 14: k4 ^= tail[13] << 8; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + + case 12: k3 ^= tail[11] << 24; + case 11: k3 ^= tail[10] << 16; + case 10: k3 ^= tail[ 9] << 8; + case 9: k3 ^= tail[ 8] << 0; + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + + case 8: k2 ^= tail[ 7] << 24; + case 7: k2 ^= tail[ 6] << 16; + case 6: k2 ^= tail[ 5] << 8; + case 5: k2 ^= tail[ 4] << 0; + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + + case 4: k1 ^= tail[ 3] << 24; + case 3: k1 ^= tail[ 2] << 16; + case 2: k1 ^= tail[ 1] << 8; + case 1: k1 ^= tail[ 0] << 0; + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = hash_fmix_32(h1); + h2 = hash_fmix_32(h2); + h3 = hash_fmix_32(h3); + h4 = hash_fmix_32(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + r_out[0] = (((uint64_t) h2) << 32) | h1; + r_out[1] = (((uint64_t) h4) << 32) | h3; +} + +UNUSED JEMALLOC_INLINE void +hash_x64_128(const void *key, const int len, const uint32_t seed, + uint64_t r_out[2]) +{ + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + + const uint64_t c1 = KQU(0x87c37b91114253d5); + const uint64_t c2 = KQU(0x4cf5ad432745937f); + + /* body */ + { + const uint64_t *blocks = (const uint64_t *) (data); + int i; + + for (i = 0; i < nblocks; i++) { + uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); + uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); + + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_64(h1, 27); h1 += h2; + h1 = h1*5 + 0x52dce729; + + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + + h2 = hash_rotl_64(h2, 31); h2 += h1; + h2 = h2*5 + 0x38495ab5; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t*)(data + nblocks*16); + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch (len & 15) { + case 15: k2 ^= ((uint64_t)(tail[14])) << 48; + case 14: k2 ^= ((uint64_t)(tail[13])) << 40; + case 13: k2 ^= ((uint64_t)(tail[12])) << 32; + case 12: k2 ^= ((uint64_t)(tail[11])) << 24; + case 11: k2 ^= ((uint64_t)(tail[10])) << 16; + case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; + case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + + case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; + case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; + case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; + case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; + case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; + case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; + case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; + case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = hash_fmix_64(h1); + h2 = hash_fmix_64(h2); + + h1 += h2; + h2 += h1; + + r_out[0] = h1; + r_out[1] = h2; +} + +/******************************************************************************/ +/* API. */ +JEMALLOC_INLINE void +hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) +{ + + assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ + +#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) + hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); +#else + { + uint64_t hashes[2]; + hash_x86_128(key, (int)len, seed, hashes); + r_hash[0] = (size_t)hashes[0]; + r_hash[1] = (size_t)hashes[1]; + } +#endif +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/huge.h b/sources/jemalloc/include-linux/jemalloc/internal/huge.h new file mode 100755 index 00000000..22184d9b --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/huge.h @@ -0,0 +1,35 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); +void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero); +bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize_min, size_t usize_max, bool zero); +void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache); +#ifdef JEMALLOC_JET +typedef void (huge_dalloc_junk_t)(void *, size_t); +extern huge_dalloc_junk_t *huge_dalloc_junk; +#endif +void huge_dalloc(tsdn_t *tsdn, void *ptr); +arena_t *huge_aalloc(const void *ptr); +size_t huge_salloc(tsdn_t *tsdn, const void *ptr); +prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx); +void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal.h b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal.h new file mode 100755 index 00000000..f8df7996 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal.h @@ -0,0 +1,1294 @@ +#ifndef JEMALLOC_INTERNAL_H +#define JEMALLOC_INTERNAL_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) je_##n +# include "../jemalloc.h" +#endif +#include "jemalloc/internal/private_namespace.h" + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_munmap = +#ifdef JEMALLOC_MUNMAP + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; +static const bool config_thp = +#ifdef JEMALLOC_THP + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_valgrind = +#ifdef JEMALLOC_VALGRIND + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_ivsalloc = +#ifdef JEMALLOC_IVSALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; + +#ifdef JEMALLOC_C11ATOMICS +#include +#endif + +#ifdef JEMALLOC_ATOMIC9 +#include +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#endif + +#include "jemalloc/internal/ph.h" +#ifndef __PGI +#define RB_COMPACT +#endif +#include "jemalloc/internal/rb.h" +#include "jemalloc/internal/qr.h" +#include "jemalloc/internal/ql.h" + +/* + * jemalloc can conceptually be broken into components (arena, tcache, etc.), + * but there are circular dependencies that cannot be broken without + * substantial performance degradation. In order to reduce the effect on + * visual code flow, read the header files in multiple passes, with one of the + * following cpp variables defined during each pass: + * + * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * types. + * JEMALLOC_H_STRUCTS : Data structures. + * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. + * JEMALLOC_H_INLINES : Inline functions. + */ +/******************************************************************************/ +#define JEMALLOC_H_TYPES + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* Page size index type. */ +typedef unsigned pszind_t; + +/* Size class index type. */ +typedef unsigned szind_t; + +/* + * Flags bits: + * + * a: arena + * t: tcache + * 0: unused + * z: zero + * n: alignment + * + * aaaaaaaa aaaatttt tttttttt 0znnnnnn + */ +#define MALLOCX_ARENA_MASK ((int)~0xfffff) +#define MALLOCX_ARENA_MAX 0xffe +#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU) +#define MALLOCX_TCACHE_MAX 0xffd +#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) +/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ +#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ + (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) +#define MALLOCX_ALIGN_GET(flags) \ + (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) +#define MALLOCX_ZERO_GET(flags) \ + ((bool)(flags & MALLOCX_ZERO)) + +#define MALLOCX_TCACHE_GET(flags) \ + (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2) +#define MALLOCX_ARENA_GET(flags) \ + (((unsigned)(((unsigned)flags) >> 20)) - 1) + +/* Smallest size class to support. */ +#define TINY_MIN (1U << LG_TINY_MIN) + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#ifndef LG_QUANTUM +# if (defined(__i386__) || defined(_M_IX86)) +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# ifdef __aarch64__ +# define LG_QUANTUM 4 +# endif +# ifdef __hppa__ +# define LG_QUANTUM 4 +# endif +# ifdef __mips__ +# define LG_QUANTUM 3 +# endif +# ifdef __or1k__ +# define LG_QUANTUM 3 +# endif +# ifdef __powerpc__ +# define LG_QUANTUM 4 +# endif +# ifdef __riscv__ +# define LG_QUANTUM 4 +# endif +# ifdef __s390__ +# define LG_QUANTUM 4 +# endif +# ifdef __SH4__ +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifdef __le32__ +# define LG_QUANTUM 4 +# endif +# ifndef LG_QUANTUM +# error "Unknown minimum alignment for architecture; specify via " + "--with-lg-quantum" +# endif +#endif + +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) + +/* Return the smallest quantum multiple that is >= a. */ +#define QUANTUM_CEILING(a) \ + (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) + +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) + +/* Return the smallest long multiple that is >= a. */ +#define LONG_CEILING(a) \ + (((a) + LONG_MASK) & ~LONG_MASK) + +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) + +/* Return the smallest (void *) multiple that is >= a. */ +#define PTR_CEILING(a) \ + (((a) + PTR_MASK) & ~PTR_MASK) + +/* + * Maximum size of L1 cache line. This is used to avoid cache line aliasing. + * In addition, this controls the spacing of cacheline-spaced size classes. + * + * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can + * only handle raw constants. + */ +#define LG_CACHELINE 6 +#define CACHELINE 64 +#define CACHELINE_MASK (CACHELINE - 1) + +/* Return the smallest cacheline multiple that is >= s. */ +#define CACHELINE_CEILING(s) \ + (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) + +/* Page size. LG_PAGE is determined by the configure script. */ +#ifdef PAGE_MASK +# undef PAGE_MASK +#endif +#define PAGE ((size_t)(1U << LG_PAGE)) +#define PAGE_MASK ((size_t)(PAGE - 1)) + +/* Return the page base address for the page containing address a. */ +#define PAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~PAGE_MASK)) + +/* Return the smallest pagesize multiple that is >= s. */ +#define PAGE_CEILING(s) \ + (((s) + PAGE_MASK) & ~PAGE_MASK) + +/* Return the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) + +/* Return the offset between a and the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ + ((size_t)((uintptr_t)(a) & (alignment - 1))) + +/* Return the smallest alignment multiple that is >= s. */ +#define ALIGNMENT_CEILING(s, alignment) \ + (((s) + (alignment - 1)) & ((~(alignment)) + 1)) + +/* Declare a variable-length array. */ +#if __STDC_VERSION__ < 199901L +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef JEMALLOC_HAS_ALLOCA_H +# include +# else +# include +# endif +# endif +# define VARIABLE_ARRAY(type, name, count) \ + type *name = alloca(sizeof(type) * (count)) +#else +# define VARIABLE_ARRAY(type, name, count) type name[(count)] +#endif + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_TYPES +/******************************************************************************/ +#define JEMALLOC_H_STRUCTS + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#define JEMALLOC_ARENA_STRUCTS_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_STRUCTS_A +#include "jemalloc/internal/extent.h" +#define JEMALLOC_ARENA_STRUCTS_B +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_STRUCTS_B +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#include "jemalloc/internal/tsd.h" + +#undef JEMALLOC_H_STRUCTS +/******************************************************************************/ +#define JEMALLOC_H_EXTERNS + +extern bool opt_abort; +extern const char *opt_junk; +extern bool opt_junk_alloc; +extern bool opt_junk_free; +extern size_t opt_quarantine; +extern bool opt_redzone; +extern bool opt_utrace; +extern bool opt_xmalloc; +extern bool opt_zero; +extern unsigned opt_narenas; + +extern bool in_valgrind; + +/* Number of CPUs. */ +extern unsigned ncpus; + +/* Number of arenas used for automatic multiplexing of threads and arenas. */ +extern unsigned narenas_auto; + +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + */ +extern arena_t **arenas; + +/* + * pind2sz_tab encodes the same information as could be computed by + * pind2sz_compute(). + */ +extern size_t const pind2sz_tab[NPSIZES]; +/* + * index2size_tab encodes the same information as could be computed (at + * unacceptable cost in some code paths) by index2size_compute(). + */ +extern size_t const index2size_tab[NSIZES]; +/* + * size2index_tab is a compact lookup table that rounds request sizes up to + * size classes. In order to reduce cache footprint, the table is compressed, + * and all accesses are via size2index(). + */ +extern uint8_t const size2index_tab[]; + +arena_t *a0get(void); +void *a0malloc(size_t size); +void a0dalloc(void *ptr); +void *bootstrap_malloc(size_t size); +void *bootstrap_calloc(size_t num, size_t size); +void bootstrap_free(void *ptr); +unsigned narenas_total_get(void); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind); +arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); +arena_t *arena_choose_hard(tsd_t *tsd, bool internal); +void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); +void thread_allocated_cleanup(tsd_t *tsd); +void thread_deallocated_cleanup(tsd_t *tsd); +void iarena_cleanup(tsd_t *tsd); +void arena_cleanup(tsd_t *tsd); +void arenas_tdata_cleanup(tsd_t *tsd); +void narenas_tdata_cleanup(tsd_t *tsd); +void arenas_tdata_bypass_cleanup(tsd_t *tsd); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" +#include "jemalloc/internal/tsd.h" + +#undef JEMALLOC_H_EXTERNS +/******************************************************************************/ +#define JEMALLOC_H_INLINES + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" + +#ifndef JEMALLOC_ENABLE_INLINE +pszind_t psz2ind(size_t psz); +size_t pind2sz_compute(pszind_t pind); +size_t pind2sz_lookup(pszind_t pind); +size_t pind2sz(pszind_t pind); +size_t psz2u(size_t psz); +szind_t size2index_compute(size_t size); +szind_t size2index_lookup(size_t size); +szind_t size2index(size_t size); +size_t index2size_compute(szind_t index); +size_t index2size_lookup(szind_t index); +size_t index2size(szind_t index); +size_t s2u_compute(size_t size); +size_t s2u_lookup(size_t size); +size_t s2u(size_t size); +size_t sa2u(size_t size, size_t alignment); +arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal); +arena_t *arena_choose(tsd_t *tsd, arena_t *arena); +arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena); +arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, + bool refresh_if_missing); +arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing); +ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_INLINE pszind_t +psz2ind(size_t psz) +{ + + if (unlikely(psz > HUGE_MAXCLASS)) + return (NPSIZES); + { + pszind_t x = lg_floor((psz<<1)-1); + pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - + (LG_SIZE_CLASS_GROUP + LG_PAGE); + pszind_t grp = shift << LG_SIZE_CLASS_GROUP; + + pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZI(-1) << lg_delta; + pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + pszind_t ind = grp + mod; + return (ind); + } +} + +JEMALLOC_INLINE size_t +pind2sz_compute(pszind_t pind) +{ + + { + size_t grp = pind >> LG_SIZE_CLASS_GROUP; + size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_PAGE + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_PAGE-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t sz = grp_size + mod_size; + return (sz); + } +} + +JEMALLOC_INLINE size_t +pind2sz_lookup(pszind_t pind) +{ + size_t ret = (size_t)pind2sz_tab[pind]; + assert(ret == pind2sz_compute(pind)); + return (ret); +} + +JEMALLOC_INLINE size_t +pind2sz(pszind_t pind) +{ + + assert(pind < NPSIZES); + return (pind2sz_lookup(pind)); +} + +JEMALLOC_INLINE size_t +psz2u(size_t psz) +{ + + if (unlikely(psz > HUGE_MAXCLASS)) + return (0); + { + size_t x = lg_floor((psz<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (psz + delta_mask) & ~delta_mask; + return (usize); + } +} + +JEMALLOC_INLINE szind_t +size2index_compute(size_t size) +{ + + if (unlikely(size > HUGE_MAXCLASS)) + return (NSIZES); +#if (NTBINS != 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); + } +#endif + { + szind_t x = lg_floor((size<<1)-1); + szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : + x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); + szind_t grp = shift << LG_SIZE_CLASS_GROUP; + + szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZI(-1) << lg_delta; + szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + szind_t index = NTBINS + grp + mod; + return (index); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +size2index_lookup(size_t size) +{ + + assert(size <= LOOKUP_MAXCLASS); + { + szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]); + assert(ret == size2index_compute(size)); + return (ret); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +size2index(size_t size) +{ + + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) + return (size2index_lookup(size)); + return (size2index_compute(size)); +} + +JEMALLOC_INLINE size_t +index2size_compute(szind_t index) +{ + +#if (NTBINS > 0) + if (index < NTBINS) + return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); +#endif + { + size_t reduced_index = index - NTBINS; + size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; + size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - + 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_QUANTUM + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_QUANTUM-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t usize = grp_size + mod_size; + return (usize); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +index2size_lookup(szind_t index) +{ + size_t ret = (size_t)index2size_tab[index]; + assert(ret == index2size_compute(index)); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +index2size(szind_t index) +{ + + assert(index < NSIZES); + return (index2size_lookup(index)); +} + +JEMALLOC_ALWAYS_INLINE size_t +s2u_compute(size_t size) +{ + + if (unlikely(size > HUGE_MAXCLASS)) + return (0); +#if (NTBINS > 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : + (ZU(1) << lg_ceil)); + } +#endif + { + size_t x = lg_floor((size<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (size + delta_mask) & ~delta_mask; + return (usize); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +s2u_lookup(size_t size) +{ + size_t ret = index2size_lookup(size2index_lookup(size)); + + assert(ret == s2u_compute(size)); + return (ret); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size. + */ +JEMALLOC_ALWAYS_INLINE size_t +s2u(size_t size) +{ + + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) + return (s2u_lookup(size)); + return (s2u_compute(size)); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size and alignment. + */ +JEMALLOC_ALWAYS_INLINE size_t +sa2u(size_t size, size_t alignment) +{ + size_t usize; + + assert(alignment != 0 && ((alignment - 1) & alignment) == 0); + + /* Try for a small size class. */ + if (size <= SMALL_MAXCLASS && alignment < PAGE) { + /* + * Round size up to the nearest multiple of alignment. + * + * This done, we can take advantage of the fact that for each + * small size class, every object is aligned at the smallest + * power of two that is non-zero in the base two representation + * of the size. For example: + * + * Size | Base 2 | Minimum alignment + * -----+----------+------------------ + * 96 | 1100000 | 32 + * 144 | 10100000 | 32 + * 192 | 11000000 | 64 + */ + usize = s2u(ALIGNMENT_CEILING(size, alignment)); + if (usize < LARGE_MINCLASS) + return (usize); + } + + /* Try for a large size class. */ + if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { + /* + * We can't achieve subpage alignment, so round up alignment + * to the minimum that can actually be supported. + */ + alignment = PAGE_CEILING(alignment); + + /* Make sure result is a large size class. */ + usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); + + /* + * Calculate the size of the over-size run that arena_palloc() + * would need to allocate in order to guarantee the alignment. + */ + if (usize + large_pad + alignment - PAGE <= arena_maxrun) + return (usize); + } + + /* Huge size class. Beware of overflow. */ + + if (unlikely(alignment > HUGE_MAXCLASS)) + return (0); + + /* + * We can't achieve subchunk alignment, so round up alignment to the + * minimum that can actually be supported. + */ + alignment = CHUNK_CEILING(alignment); + + /* Make sure result is a huge size class. */ + if (size <= chunksize) + usize = chunksize; + else { + usize = s2u(size); + if (usize < size) { + /* size_t overflow. */ + return (0); + } + } + + /* + * Calculate the multi-chunk mapping that huge_palloc() would need in + * order to guarantee the alignment. + */ + if (usize + alignment - PAGE < usize) { + /* size_t overflow. */ + return (0); + } + return (usize); +} + +/* Choose an arena based on a per-thread value. */ +JEMALLOC_INLINE arena_t * +arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) +{ + arena_t *ret; + + if (arena != NULL) + return (arena); + + ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); + if (unlikely(ret == NULL)) + ret = arena_choose_hard(tsd, internal); + + return (ret); +} + +JEMALLOC_INLINE arena_t * +arena_choose(tsd_t *tsd, arena_t *arena) +{ + + return (arena_choose_impl(tsd, arena, false)); +} + +JEMALLOC_INLINE arena_t * +arena_ichoose(tsd_t *tsd, arena_t *arena) +{ + + return (arena_choose_impl(tsd, arena, true)); +} + +JEMALLOC_INLINE arena_tdata_t * +arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) +{ + arena_tdata_t *tdata; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + + if (unlikely(arenas_tdata == NULL)) { + /* arenas_tdata hasn't been initialized yet. */ + return (arena_tdata_get_hard(tsd, ind)); + } + if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { + /* + * ind is invalid, cache is old (too small), or tdata to be + * initialized. + */ + return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : + NULL); + } + + tdata = &arenas_tdata[ind]; + if (likely(tdata != NULL) || !refresh_if_missing) + return (tdata); + return (arena_tdata_get_hard(tsd, ind)); +} + +JEMALLOC_INLINE arena_t * +arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) +{ + arena_t *ret; + + assert(ind <= MALLOCX_ARENA_MAX); + + ret = arenas[ind]; + if (unlikely(ret == NULL)) { + ret = atomic_read_p((void *)&arenas[ind]); + if (init_if_missing && unlikely(ret == NULL)) + ret = arena_init(tsdn, ind); + } + return (ret); +} + +JEMALLOC_INLINE ticker_t * +decay_ticker_get(tsd_t *tsd, unsigned ind) +{ + arena_tdata_t *tdata; + + tdata = arena_tdata_get(tsd, ind, true); + if (unlikely(tdata == NULL)) + return (NULL); + return (&tdata->decay_ticker); +} +#endif + +#include "jemalloc/internal/bitmap.h" +/* + * Include portions of arena.h interleaved with tcache.h in order to resolve + * circular dependencies. + */ +#define JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/tcache.h" +#define JEMALLOC_ARENA_INLINE_B +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_B +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" + +#ifndef JEMALLOC_ENABLE_INLINE +arena_t *iaalloc(const void *ptr); +size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote); +void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path); +void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, + bool slow_path); +void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena); +void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena); +void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); +size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote); +size_t u2rz(size_t usize); +size_t p2rz(tsdn_t *tsdn, const void *ptr); +void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, + bool slow_path); +void idalloc(tsd_t *tsd, void *ptr); +void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); +void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, + arena_t *arena); +void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); +void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero); +bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_ALWAYS_INLINE arena_t * +iaalloc(const void *ptr) +{ + + assert(ptr != NULL); + + return (arena_aalloc(ptr)); +} + +/* + * Typical usage: + * tsdn_t *tsdn = [...] + * void *ptr = [...] + * size_t sz = isalloc(tsdn, ptr, config_prof); + */ +JEMALLOC_ALWAYS_INLINE size_t +isalloc(tsdn_t *tsdn, const void *ptr, bool demote) +{ + + assert(ptr != NULL); + /* Demotion only makes sense if config_prof is true. */ + assert(config_prof || !demote); + + return (arena_salloc(tsdn, ptr, demote)); +} + +JEMALLOC_ALWAYS_INLINE void * +iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, + bool is_metadata, arena_t *arena, bool slow_path) +{ + void *ret; + + assert(size != 0); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); + + ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), + isalloc(tsdn, ret, config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) +{ + + return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true), + false, NULL, slow_path)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena) +{ + void *ret; + + assert(usize != 0); + assert(usize == sa2u(usize, alignment)); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); + + ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret, + config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) +{ + + return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) +{ + + return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, + tcache_get(tsd, true), false, NULL)); +} + +JEMALLOC_ALWAYS_INLINE size_t +ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) +{ + extent_node_t *node; + + /* Return 0 if ptr is not within a chunk managed by jemalloc. */ + node = chunk_lookup(ptr, false); + if (node == NULL) + return (0); + /* Only arena chunks should be looked up via interior pointers. */ + assert(extent_node_addr_get(node) == ptr || + extent_node_achunk_get(node)); + + return (isalloc(tsdn, ptr, demote)); +} + +JEMALLOC_INLINE size_t +u2rz(size_t usize) +{ + size_t ret; + + if (usize <= SMALL_MAXCLASS) { + szind_t binind = size2index(usize); + ret = arena_bin_info[binind].redzone_size; + } else + ret = 0; + + return (ret); +} + +JEMALLOC_INLINE size_t +p2rz(tsdn_t *tsdn, const void *ptr) +{ + size_t usize = isalloc(tsdn, ptr, false); + + return (u2rz(usize)); +} + +JEMALLOC_ALWAYS_INLINE void +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, + bool slow_path) +{ + + assert(ptr != NULL); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); + if (config_stats && is_metadata) { + arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr, + config_prof)); + } + + arena_dalloc(tsdn, ptr, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +idalloc(tsd_t *tsd, void *ptr) +{ + + idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true); +} + +JEMALLOC_ALWAYS_INLINE void +iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) +{ + + if (slow_path && config_fill && unlikely(opt_quarantine)) + quarantine(tsd, ptr); + else + idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) +{ + + arena_sdalloc(tsdn, ptr, size, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) +{ + + if (slow_path && config_fill && unlikely(opt_quarantine)) + quarantine(tsd, ptr); + else + isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) +{ + void *p; + size_t usize, copysize; + + usize = sa2u(size + extra, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + return (NULL); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena); + if (p == NULL) { + if (extra == 0) + return (NULL); + /* Try again, without extra this time. */ + usize = sa2u(size, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + return (NULL); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, + arena); + if (p == NULL) + return (NULL); + } + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(p, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache, true); + return (p); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero, tcache_t *tcache, arena_t *arena) +{ + + assert(ptr != NULL); + assert(size != 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* + * Existing object alignment is inadequate; allocate new space + * and copy. + */ + return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment, + zero, tcache, arena)); + } + + return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, + tcache)); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero) +{ + + return (iralloct(tsd, ptr, oldsize, size, alignment, zero, + tcache_get(tsd, true), NULL)); +} + +JEMALLOC_ALWAYS_INLINE bool +ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero) +{ + + assert(ptr != NULL); + assert(size != 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* Existing object alignment is inadequate. */ + return (true); + } + + return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero)); +} +#endif + +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_INLINES +/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_decls.h b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_decls.h new file mode 100755 index 00000000..c907d910 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_decls.h @@ -0,0 +1,75 @@ +#ifndef JEMALLOC_INTERNAL_DECLS_H +#define JEMALLOC_INTERNAL_DECLS_H + +#include +#ifdef _WIN32 +# include +# include "msvc_compat/windows_extra.h" + +#else +# include +# include +# if !defined(__pnacl__) && !defined(__native_client__) +# include +# if !defined(SYS_write) && defined(__NR_write) +# define SYS_write __NR_write +# endif +# include +# endif +# include +# ifdef JEMALLOC_OS_UNFAIR_LOCK +# include +# endif +# ifdef JEMALLOC_GLIBC_MALLOC_HOOK +# include +# endif +# include +# include +# include +# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# include +# endif +#endif +#include + +#include +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#include +#include +#include +#include +#include +#include +#ifndef offsetof +# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) +#endif +#include +#include +#include +#ifdef _MSC_VER +# include +typedef intptr_t ssize_t; +# define PATH_MAX 1024 +# define STDERR_FILENO 2 +# define __func__ __FUNCTION__ +# ifdef JEMALLOC_HAS_RESTRICT +# define restrict __restrict +# endif +/* Disable warnings about deprecated system functions. */ +# pragma warning(disable: 4996) +#if _MSC_VER < 1800 +static int +isblank(int c) +{ + + return (c == '\t' || c == ' '); +} +#endif +#else +# include +#endif +#include + +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_defs.h b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_defs.h new file mode 100755 index 00000000..c02e814e --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_defs.h @@ -0,0 +1,335 @@ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ + +//#define PUB_MEMORY_TRACKER + +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +#define JEMALLOC_PREFIX "je_" +#define JEMALLOC_CPREFIX "JE_" + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE je_ + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#ifdef __x86_64__ +#define CPU_SPINWAIT __asm__ volatile("pause") +#else +#define CPU_SPINWAIT +#endif + +/* Defined if C11 atomics are available. */ +/* #undef JEMALLOC_C11ATOMICS */ + +/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ +/* #undef JEMALLOC_ATOMIC9 */ + +/* + * Defined if OSAtomic*() functions are available, as provided by Darwin, and + * documented in the atomic(3) manual page. + */ +/* #undef JEMALLOC_OSATOMIC */ + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#define JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +/* #undef JEMALLOC_OS_UNFAIR_LOCK */ + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +/* #undef JEMALLOC_OSSPIN */ + +/* Defined if syscall(2) is usable. */ +#define JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +/* #undef JEMALLOC_HAVE_SECURE_GETENV */ + +/* + * Defined if issetugid(2) is available. + */ +/* #undef JEMALLOC_HAVE_ISSETUGID */ + +/* Defined if pthread_atfork(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_ATFORK + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 + +/* + * Defined if mach_absolute_time() is available. + */ +/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#define JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +/* #undef JEMALLOC_MUTEX_INIT_CB */ + +/* Non-empty if the tls_model attribute is supported. */ +#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) + +/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ +#define JEMALLOC_CC_SILENCE + +/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ +/* #undef JEMALLOC_CODE_COVERAGE */ + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +#ifdef PUB_MEMORY_TRACKER +#define JEMALLOC_DEBUG +#endif + +/* JEMALLOC_STATS enables statistics calculation. */ +#ifdef PUB_MEMORY_TRACKER +#define JEMALLOC_STATS +#endif + +/* JEMALLOC_PROF enables allocation profiling. */ +/* #undef JEMALLOC_PROF */ + +/* Use libunwind for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBUNWIND */ + +/* Use libgcc for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBGCC */ + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* + * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. + * This makes it possible to allocate/deallocate objects without any locking + * when the cache is in the steady state. + */ +#ifndef PUB_MEMORY_TRACKER +#define JEMALLOC_TCACHE +#endif + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage + * segment (DSS). + */ +#define JEMALLOC_DSS + +/* Support memory filling (junk/zero/quarantine/redzone). */ +#define JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +/* #undef JEMALLOC_UTRACE */ + +/* Support Valgrind. */ +/* #undef JEMALLOC_VALGRIND */ + +/* Support optional abort() on OOM. */ +/* #undef JEMALLOC_XMALLOC */ + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +/* #undef JEMALLOC_LAZY_LOCK */ + +/* Minimum size class to support is 2^LG_TINY_MIN bytes. */ +#define LG_TINY_MIN 3 + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +/* #undef LG_QUANTUM */ + +/* One page is 2^LG_PAGE bytes. */ +#define LG_PAGE 12 + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#define JEMALLOC_MAPS_COALESCE + +/* + * If defined, use munmap() to unmap freed chunks, rather than storing them for + * later reuse. This is disabled by default on Linux because common sequences + * of mmap()/munmap() calls will cause virtual memory map holes. + */ +/* #undef JEMALLOC_MUNMAP */ + +/* TLS is used to map arenas and magazine caches to threads. */ +#define JEMALLOC_TLS + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#define JEMALLOC_INTERNAL_UNREACHABLE abort + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll +#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl +#define JEMALLOC_INTERNAL_FFS __builtin_ffs + +/* + * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside + * within jemalloc-owned chunks before dereferencing them. + */ +#ifdef PUB_MEMORY_TRACKER +#define JEMALLOC_IVSALLOC +#endif + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#define JEMALLOC_CACHE_OBLIVIOUS + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ +#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#define JEMALLOC_HAVE_MADVISE + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +/* #undef JEMALLOC_HAVE_MADVISE_HUGE */ + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that + * new pages will be demand-zeroed if the + * address region is later touched. + */ +/* #undef JEMALLOC_PURGE_MADVISE_FREE */ +#define JEMALLOC_PURGE_MADVISE_DONTNEED + +/* Defined if transparent huge page support is enabled. */ +/* #undef JEMALLOC_THP */ + +/* Define if operating system has alloca.h header. */ +#define JEMALLOC_HAS_ALLOCA_H 1 + +/* C99 restrict keyword supported. */ +/*#define JEMALLOC_HAS_RESTRICT 1 */ + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#if (defined(__LP64__) && __LP64__) +#define LG_SIZEOF_LONG 3 +#else +#define LG_SIZEOF_LONG 2 +#endif + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#define LG_SIZEOF_LONG_LONG 3 + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#define JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#define JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* Adaptive mutex support in pthreads. */ +#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +#define JEMALLOC_EXPORT /**/ + +/* config.malloc_conf options string. */ +#define JEMALLOC_CONFIG_MALLOC_CONF "" + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_macros.h b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_macros.h new file mode 100755 index 00000000..a08ba772 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/jemalloc_internal_macros.h @@ -0,0 +1,57 @@ +/* + * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for + * functions that are static inline functions if inlining is enabled, and + * single-definition library-private functions if inlining is disabled. + * + * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in + * which case the denoted functions are always static, regardless of whether + * inlining is enabled. + */ +#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) + /* Disable inlining to make debugging/profiling easier. */ +# define JEMALLOC_ALWAYS_INLINE +# define JEMALLOC_ALWAYS_INLINE_C static +# define JEMALLOC_INLINE +# define JEMALLOC_INLINE_C static +# define inline +#else +# define JEMALLOC_ENABLE_INLINE +# ifdef JEMALLOC_HAVE_ATTR +# define JEMALLOC_ALWAYS_INLINE \ + static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) +# define JEMALLOC_ALWAYS_INLINE_C \ + static inline JEMALLOC_ATTR(always_inline) +# else +# define JEMALLOC_ALWAYS_INLINE static inline +# define JEMALLOC_ALWAYS_INLINE_C static inline +# endif +# define JEMALLOC_INLINE static inline +# define JEMALLOC_INLINE_C static inline +# ifdef _MSC_VER +# define inline _inline +# endif +#endif + +#ifdef JEMALLOC_CC_SILENCE +# define UNUSED JEMALLOC_ATTR(unused) +#else +# define UNUSED +#endif + +#define ZU(z) ((size_t)z) +#define ZI(z) ((ssize_t)z) +#define QU(q) ((uint64_t)q) +#define QI(q) ((int64_t)q) + +#define KZU(z) ZU(z##ULL) +#define KZI(z) ZI(z##LL) +#define KQU(q) QU(q##ULL) +#define KQI(q) QI(q##LL) + +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif + +#ifndef JEMALLOC_HAS_RESTRICT +# define restrict +#endif diff --git a/sources/jemalloc/include-linux/jemalloc/internal/mb.h b/sources/jemalloc/include-linux/jemalloc/internal/mb.h new file mode 100755 index 00000000..e58da5c3 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/mb.h @@ -0,0 +1,115 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void mb_write(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) +#ifdef __i386__ +/* + * According to the Intel Architecture Software Developer's Manual, current + * processors execute instructions in order from the perspective of other + * processors in a multiprocessor system, but 1) Intel reserves the right to + * change that, and 2) the compiler's optimizer could re-order instructions if + * there weren't some form of barrier. Therefore, even if running on an + * architecture that does not need memory barriers (everything through at least + * i686), an "optimizer barrier" is necessary. + */ +JEMALLOC_INLINE void +mb_write(void) +{ + +# if 0 + /* This is a true memory barrier. */ + asm volatile ("pusha;" + "xor %%eax,%%eax;" + "cpuid;" + "popa;" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +# else + /* + * This is hopefully enough to keep the compiler from reordering + * instructions around this one. + */ + asm volatile ("nop;" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +# endif +} +#elif (defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("sfence" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__powerpc__) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("eieio" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__sparc__) && defined(__arch64__) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("membar #StoreStore" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__tile__) +JEMALLOC_INLINE void +mb_write(void) +{ + + __sync_synchronize(); +} +#else +/* + * This is much slower than a simple memory barrier, but the semantics of mutex + * unlock make this work. + */ +JEMALLOC_INLINE void +mb_write(void) +{ + malloc_mutex_t mtx; + + malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT); + malloc_mutex_lock(TSDN_NULL, &mtx); + malloc_mutex_unlock(TSDN_NULL, &mtx); +} +#endif +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/mutex.h b/sources/jemalloc/include-linux/jemalloc/internal/mutex.h new file mode 100755 index 00000000..2b4b1c31 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/mutex.h @@ -0,0 +1,145 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct malloc_mutex_s malloc_mutex_t; + +#ifdef _WIN32 +# define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_INITIALIZER \ + {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +#elif (defined(JEMALLOC_OSSPIN)) +# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +#else +# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ + defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \ + WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +# else +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +# endif +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct malloc_mutex_s { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + SRWLOCK lock; +# else + CRITICAL_SECTION lock; +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLock lock; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; +#else + pthread_mutex_t lock; +#endif + witness_t witness; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_LAZY_LOCK +extern bool isthreaded; +#else +# undef isthreaded /* Undo private_namespace.h definition. */ +# define isthreaded true +#endif + +bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank); +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); +bool malloc_mutex_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) +JEMALLOC_INLINE void +malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + witness_assert_not_owner(tsdn, &mutex->witness); + if (isthreaded) { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + AcquireSRWLockExclusive(&mutex->lock); +# else + EnterCriticalSection(&mutex->lock); +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_lock(&mutex->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockLock(&mutex->lock); +#else + pthread_mutex_lock(&mutex->lock); +#endif + } + witness_lock(tsdn, &mutex->witness); +} + +JEMALLOC_INLINE void +malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + witness_unlock(tsdn, &mutex->witness); + if (isthreaded) { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + ReleaseSRWLockExclusive(&mutex->lock); +# else + LeaveCriticalSection(&mutex->lock); +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_unlock(&mutex->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockUnlock(&mutex->lock); +#else + pthread_mutex_unlock(&mutex->lock); +#endif + } +} + +JEMALLOC_INLINE void +malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + witness_assert_owner(tsdn, &mutex->witness); +} + +JEMALLOC_INLINE void +malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + witness_assert_not_owner(tsdn, &mutex->witness); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/nstime.h b/sources/jemalloc/include-linux/jemalloc/internal/nstime.h new file mode 100755 index 00000000..93b27dc8 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/nstime.h @@ -0,0 +1,48 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct nstime_s nstime_t; + +/* Maximum supported number of seconds (~584 years). */ +#define NSTIME_SEC_MAX KQU(18446744072) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct nstime_s { + uint64_t ns; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void nstime_init(nstime_t *time, uint64_t ns); +void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); +uint64_t nstime_ns(const nstime_t *time); +uint64_t nstime_sec(const nstime_t *time); +uint64_t nstime_nsec(const nstime_t *time); +void nstime_copy(nstime_t *time, const nstime_t *source); +int nstime_compare(const nstime_t *a, const nstime_t *b); +void nstime_add(nstime_t *time, const nstime_t *addend); +void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); +void nstime_imultiply(nstime_t *time, uint64_t multiplier); +void nstime_idivide(nstime_t *time, uint64_t divisor); +uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); +#ifdef JEMALLOC_JET +typedef bool (nstime_monotonic_t)(void); +extern nstime_monotonic_t *nstime_monotonic; +typedef bool (nstime_update_t)(nstime_t *); +extern nstime_update_t *nstime_update; +#else +bool nstime_monotonic(void); +bool nstime_update(nstime_t *time); +#endif + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/pages.h b/sources/jemalloc/include-linux/jemalloc/internal/pages.h new file mode 100755 index 00000000..4ae9f156 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/pages.h @@ -0,0 +1,29 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *pages_map(void *addr, size_t size, bool *commit); +void pages_unmap(void *addr, size_t size); +void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, + size_t size, bool *commit); +bool pages_commit(void *addr, size_t size); +bool pages_decommit(void *addr, size_t size); +bool pages_purge(void *addr, size_t size); +bool pages_huge(void *addr, size_t size); +bool pages_nohuge(void *addr, size_t size); +void pages_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ph.h b/sources/jemalloc/include-linux/jemalloc/internal/ph.h new file mode 100755 index 00000000..4f91c333 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/ph.h @@ -0,0 +1,345 @@ +/* + * A Pairing Heap implementation. + * + * "The Pairing Heap: A New Form of Self-Adjusting Heap" + * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf + * + * With auxiliary twopass list, described in a follow on paper. + * + * "Pairing Heaps: Experiments and Analysis" + * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf + * + ******************************************************************************* + */ + +#ifndef PH_H_ +#define PH_H_ + +/* Node structure. */ +#define phn(a_type) \ +struct { \ + a_type *phn_prev; \ + a_type *phn_next; \ + a_type *phn_lchild; \ +} + +/* Root structure. */ +#define ph(a_type) \ +struct { \ + a_type *ph_root; \ +} + +/* Internal utility macros. */ +#define phn_lchild_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_lchild) +#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ + a_phn->a_field.phn_lchild = a_lchild; \ +} while (0) + +#define phn_next_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_next) +#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ + a_phn->a_field.phn_prev = a_prev; \ +} while (0) + +#define phn_prev_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_prev) +#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ + a_phn->a_field.phn_next = a_next; \ +} while (0) + +#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ + a_type *phn0child; \ + \ + assert(a_phn0 != NULL); \ + assert(a_phn1 != NULL); \ + assert(a_cmp(a_phn0, a_phn1) <= 0); \ + \ + phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ + phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ + phn_next_set(a_type, a_field, a_phn1, phn0child); \ + if (phn0child != NULL) \ + phn_prev_set(a_type, a_field, phn0child, a_phn1); \ + phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ +} while (0) + +#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ + if (a_phn0 == NULL) \ + r_phn = a_phn1; \ + else if (a_phn1 == NULL) \ + r_phn = a_phn0; \ + else if (a_cmp(a_phn0, a_phn1) < 0) { \ + phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ + a_cmp); \ + r_phn = a_phn0; \ + } else { \ + phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ + a_cmp); \ + r_phn = a_phn1; \ + } \ +} while (0) + +#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *head = NULL; \ + a_type *tail = NULL; \ + a_type *phn0 = a_phn; \ + a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ + \ + /* \ + * Multipass merge, wherein the first two elements of a FIFO \ + * are repeatedly merged, and each result is appended to the \ + * singly linked FIFO, until the FIFO contains only a single \ + * element. We start with a sibling list but no reference to \ + * its tail, so we do a single pass over the sibling list to \ + * populate the FIFO. \ + */ \ + if (phn1 != NULL) { \ + a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ + if (phnrest != NULL) \ + phn_prev_set(a_type, a_field, phnrest, NULL); \ + phn_prev_set(a_type, a_field, phn0, NULL); \ + phn_next_set(a_type, a_field, phn0, NULL); \ + phn_prev_set(a_type, a_field, phn1, NULL); \ + phn_next_set(a_type, a_field, phn1, NULL); \ + phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ + head = tail = phn0; \ + phn0 = phnrest; \ + while (phn0 != NULL) { \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + phnrest = phn_next_get(a_type, a_field, \ + phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, \ + phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, \ + NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + phn_prev_set(a_type, a_field, phn1, \ + NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = phnrest; \ + } else { \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = NULL; \ + } \ + } \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + while (true) { \ + head = phn_next_get(a_type, a_field, \ + phn1); \ + assert(phn_prev_get(a_type, a_field, \ + phn0) == NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + assert(phn_prev_get(a_type, a_field, \ + phn1) == NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + if (head == NULL) \ + break; \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, \ + phn0); \ + } \ + } \ + } \ + r_phn = phn0; \ +} while (0) + +#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ + a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ + if (phn != NULL) { \ + phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_prev_set(a_type, a_field, phn, NULL); \ + ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ + assert(phn_next_get(a_type, a_field, phn) == NULL); \ + phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ + a_ph->ph_root); \ + } \ +} while (0) + +#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ + if (lchild == NULL) \ + r_phn = NULL; \ + else { \ + ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ + r_phn); \ + } \ +} while (0) + +/* + * The ph_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to ph_gen(). + */ +#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ +a_attr void a_prefix##new(a_ph_type *ph); \ +a_attr bool a_prefix##empty(a_ph_type *ph); \ +a_attr a_type *a_prefix##first(a_ph_type *ph); \ +a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ +a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ +a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); + +/* + * The ph_gen() macro generates a type-specific pairing heap implementation, + * based on the above cpp macros. + */ +#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_ph_type *ph) \ +{ \ + \ + memset(ph, 0, sizeof(ph(a_type))); \ +} \ +a_attr bool \ +a_prefix##empty(a_ph_type *ph) \ +{ \ + \ + return (ph->ph_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_ph_type *ph) \ +{ \ + \ + if (ph->ph_root == NULL) \ + return (NULL); \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + return (ph->ph_root); \ +} \ +a_attr void \ +a_prefix##insert(a_ph_type *ph, a_type *phn) \ +{ \ + \ + memset(&phn->a_field, 0, sizeof(phn(a_type))); \ + \ + /* \ + * Treat the root as an aux list during insertion, and lazily \ + * merge during a_prefix##remove_first(). For elements that \ + * are inserted, then removed via a_prefix##remove() before the \ + * aux list is ever processed, this makes insert/remove \ + * constant-time, whereas eager merging would make insert \ + * O(log n). \ + */ \ + if (ph->ph_root == NULL) \ + ph->ph_root = phn; \ + else { \ + phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ + a_field, ph->ph_root)); \ + if (phn_next_get(a_type, a_field, ph->ph_root) != \ + NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, ph->ph_root), \ + phn); \ + } \ + phn_prev_set(a_type, a_field, phn, ph->ph_root); \ + phn_next_set(a_type, a_field, ph->ph_root, phn); \ + } \ +} \ +a_attr a_type * \ +a_prefix##remove_first(a_ph_type *ph) \ +{ \ + a_type *ret; \ + \ + if (ph->ph_root == NULL) \ + return (NULL); \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + \ + ret = ph->ph_root; \ + \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + \ + return (ret); \ +} \ +a_attr void \ +a_prefix##remove(a_ph_type *ph, a_type *phn) \ +{ \ + a_type *replace, *parent; \ + \ + /* \ + * We can delete from aux list without merging it, but we need \ + * to merge if we are dealing with the root node. \ + */ \ + if (ph->ph_root == phn) { \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + if (ph->ph_root == phn) { \ + ph_merge_children(a_type, a_field, ph->ph_root, \ + a_cmp, ph->ph_root); \ + return; \ + } \ + } \ + \ + /* Get parent (if phn is leftmost child) before mutating. */ \ + if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ + if (phn_lchild_get(a_type, a_field, parent) != phn) \ + parent = NULL; \ + } \ + /* Find a possible replacement node, and link to parent. */ \ + ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ + /* Set next/prev for sibling linked list. */ \ + if (replace != NULL) { \ + if (parent != NULL) { \ + phn_prev_set(a_type, a_field, replace, parent); \ + phn_lchild_set(a_type, a_field, parent, \ + replace); \ + } else { \ + phn_prev_set(a_type, a_field, replace, \ + phn_prev_get(a_type, a_field, phn)); \ + if (phn_prev_get(a_type, a_field, phn) != \ + NULL) { \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + replace); \ + } \ + } \ + phn_next_set(a_type, a_field, replace, \ + phn_next_get(a_type, a_field, phn)); \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + replace); \ + } \ + } else { \ + if (parent != NULL) { \ + a_type *next = phn_next_get(a_type, a_field, \ + phn); \ + phn_lchild_set(a_type, a_field, parent, next); \ + if (next != NULL) { \ + phn_prev_set(a_type, a_field, next, \ + parent); \ + } \ + } else { \ + assert(phn_prev_get(a_type, a_field, phn) != \ + NULL); \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + phn_next_get(a_type, a_field, phn)); \ + } \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + phn_prev_get(a_type, a_field, phn)); \ + } \ + } \ +} + +#endif /* PH_H_ */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/private_namespace.h b/sources/jemalloc/include-linux/jemalloc/internal/private_namespace.h new file mode 100755 index 00000000..7ebeeba8 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/private_namespace.h @@ -0,0 +1,639 @@ +#define a0dalloc JEMALLOC_N(a0dalloc) +#define a0get JEMALLOC_N(a0get) +#define a0malloc JEMALLOC_N(a0malloc) +#define arena_aalloc JEMALLOC_N(arena_aalloc) +#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) +#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge) +#define arena_bin_index JEMALLOC_N(arena_bin_index) +#define arena_bin_info JEMALLOC_N(arena_bin_info) +#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const) +#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable) +#define arena_boot JEMALLOC_N(arena_boot) +#define arena_choose JEMALLOC_N(arena_choose) +#define arena_choose_hard JEMALLOC_N(arena_choose_hard) +#define arena_choose_impl JEMALLOC_N(arena_choose_impl) +#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge) +#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert) +#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove) +#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge) +#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand) +#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink) +#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar) +#define arena_cleanup JEMALLOC_N(arena_cleanup) +#define arena_dalloc JEMALLOC_N(arena_dalloc) +#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) +#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked) +#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) +#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) +#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) +#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked) +#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) +#define arena_decay_tick JEMALLOC_N(arena_decay_tick) +#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks) +#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get) +#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set) +#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get) +#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set) +#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) +#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) +#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next) +#define arena_get JEMALLOC_N(arena_get) +#define arena_ichoose JEMALLOC_N(arena_ichoose) +#define arena_init JEMALLOC_N(arena_init) +#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get) +#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set) +#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get) +#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set) +#define arena_malloc JEMALLOC_N(arena_malloc) +#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard) +#define arena_malloc_large JEMALLOC_N(arena_malloc_large) +#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) +#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) +#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get) +#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) +#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) +#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set) +#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) +#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) +#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) +#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) +#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode) +#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode) +#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) +#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) +#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) +#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) +#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) +#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) +#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const) +#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable) +#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) +#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) +#define arena_maxrun JEMALLOC_N(arena_maxrun) +#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge) +#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add) +#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get) +#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub) +#define arena_migrate JEMALLOC_N(arena_migrate) +#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const) +#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable) +#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind) +#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages) +#define arena_new JEMALLOC_N(arena_new) +#define arena_node_alloc JEMALLOC_N(arena_node_alloc) +#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc) +#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec) +#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get) +#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc) +#define arena_palloc JEMALLOC_N(arena_palloc) +#define arena_postfork_child JEMALLOC_N(arena_postfork_child) +#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) +#define arena_prefork0 JEMALLOC_N(arena_prefork0) +#define arena_prefork1 JEMALLOC_N(arena_prefork1) +#define arena_prefork2 JEMALLOC_N(arena_prefork2) +#define arena_prefork3 JEMALLOC_N(arena_prefork3) +#define arena_prof_accum JEMALLOC_N(arena_prof_accum) +#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) +#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) +#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) +#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get) +#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset) +#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set) +#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) +#define arena_purge JEMALLOC_N(arena_purge) +#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small) +#define arena_ralloc JEMALLOC_N(arena_ralloc) +#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) +#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) +#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm) +#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) +#define arena_reset JEMALLOC_N(arena_reset) +#define arena_run_regind JEMALLOC_N(arena_run_regind) +#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm) +#define arena_salloc JEMALLOC_N(arena_salloc) +#define arena_sdalloc JEMALLOC_N(arena_sdalloc) +#define arena_stats_merge JEMALLOC_N(arena_stats_merge) +#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) +#define arena_tdata_get JEMALLOC_N(arena_tdata_get) +#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard) +#define arenas JEMALLOC_N(arenas) +#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup) +#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup) +#define atomic_add_p JEMALLOC_N(atomic_add_p) +#define atomic_add_u JEMALLOC_N(atomic_add_u) +#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) +#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) +#define atomic_add_z JEMALLOC_N(atomic_add_z) +#define atomic_cas_p JEMALLOC_N(atomic_cas_p) +#define atomic_cas_u JEMALLOC_N(atomic_cas_u) +#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32) +#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64) +#define atomic_cas_z JEMALLOC_N(atomic_cas_z) +#define atomic_sub_p JEMALLOC_N(atomic_sub_p) +#define atomic_sub_u JEMALLOC_N(atomic_sub_u) +#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) +#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) +#define atomic_sub_z JEMALLOC_N(atomic_sub_z) +#define atomic_write_p JEMALLOC_N(atomic_write_p) +#define atomic_write_u JEMALLOC_N(atomic_write_u) +#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32) +#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64) +#define atomic_write_z JEMALLOC_N(atomic_write_z) +#define base_alloc JEMALLOC_N(base_alloc) +#define base_boot JEMALLOC_N(base_boot) +#define base_postfork_child JEMALLOC_N(base_postfork_child) +#define base_postfork_parent JEMALLOC_N(base_postfork_parent) +#define base_prefork JEMALLOC_N(base_prefork) +#define base_stats_get JEMALLOC_N(base_stats_get) +#define bitmap_full JEMALLOC_N(bitmap_full) +#define bitmap_get JEMALLOC_N(bitmap_get) +#define bitmap_info_init JEMALLOC_N(bitmap_info_init) +#define bitmap_init JEMALLOC_N(bitmap_init) +#define bitmap_set JEMALLOC_N(bitmap_set) +#define bitmap_sfu JEMALLOC_N(bitmap_sfu) +#define bitmap_size JEMALLOC_N(bitmap_size) +#define bitmap_unset JEMALLOC_N(bitmap_unset) +#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc) +#define bootstrap_free JEMALLOC_N(bootstrap_free) +#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc) +#define bt_init JEMALLOC_N(bt_init) +#define buferror JEMALLOC_N(buferror) +#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base) +#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache) +#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) +#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) +#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper) +#define chunk_boot JEMALLOC_N(chunk_boot) +#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache) +#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap) +#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper) +#define chunk_deregister JEMALLOC_N(chunk_deregister) +#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) +#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable) +#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) +#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) +#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default) +#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get) +#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set) +#define chunk_in_dss JEMALLOC_N(chunk_in_dss) +#define chunk_lookup JEMALLOC_N(chunk_lookup) +#define chunk_npages JEMALLOC_N(chunk_npages) +#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper) +#define chunk_register JEMALLOC_N(chunk_register) +#define chunks_rtree JEMALLOC_N(chunks_rtree) +#define chunksize JEMALLOC_N(chunksize) +#define chunksize_mask JEMALLOC_N(chunksize_mask) +#define ckh_count JEMALLOC_N(ckh_count) +#define ckh_delete JEMALLOC_N(ckh_delete) +#define ckh_insert JEMALLOC_N(ckh_insert) +#define ckh_iter JEMALLOC_N(ckh_iter) +#define ckh_new JEMALLOC_N(ckh_new) +#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) +#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) +#define ckh_remove JEMALLOC_N(ckh_remove) +#define ckh_search JEMALLOC_N(ckh_search) +#define ckh_string_hash JEMALLOC_N(ckh_string_hash) +#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) +#define ctl_boot JEMALLOC_N(ctl_boot) +#define ctl_bymib JEMALLOC_N(ctl_bymib) +#define ctl_byname JEMALLOC_N(ctl_byname) +#define ctl_nametomib JEMALLOC_N(ctl_nametomib) +#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) +#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) +#define ctl_prefork JEMALLOC_N(ctl_prefork) +#define decay_ticker_get JEMALLOC_N(decay_ticker_get) +#define dss_prec_names JEMALLOC_N(dss_prec_names) +#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get) +#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set) +#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get) +#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set) +#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get) +#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set) +#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get) +#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set) +#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert) +#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init) +#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove) +#define extent_node_init JEMALLOC_N(extent_node_init) +#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get) +#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set) +#define extent_node_size_get JEMALLOC_N(extent_node_size_get) +#define extent_node_size_set JEMALLOC_N(extent_node_size_set) +#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get) +#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set) +#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get) +#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set) +#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil) +#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor) +#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy) +#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse) +#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty) +#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) +#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) +#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) +#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) +#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) +#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) +#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) +#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) +#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) +#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) +#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) +#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) +#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) +#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) +#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) +#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) +#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy) +#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse) +#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty) +#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first) +#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert) +#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter) +#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse) +#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start) +#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last) +#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new) +#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next) +#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch) +#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev) +#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch) +#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove) +#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter) +#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse) +#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start) +#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search) +#define ffs_llu JEMALLOC_N(ffs_llu) +#define ffs_lu JEMALLOC_N(ffs_lu) +#define ffs_u JEMALLOC_N(ffs_u) +#define ffs_u32 JEMALLOC_N(ffs_u32) +#define ffs_u64 JEMALLOC_N(ffs_u64) +#define ffs_zu JEMALLOC_N(ffs_zu) +#define get_errno JEMALLOC_N(get_errno) +#define hash JEMALLOC_N(hash) +#define hash_fmix_32 JEMALLOC_N(hash_fmix_32) +#define hash_fmix_64 JEMALLOC_N(hash_fmix_64) +#define hash_get_block_32 JEMALLOC_N(hash_get_block_32) +#define hash_get_block_64 JEMALLOC_N(hash_get_block_64) +#define hash_rotl_32 JEMALLOC_N(hash_rotl_32) +#define hash_rotl_64 JEMALLOC_N(hash_rotl_64) +#define hash_x64_128 JEMALLOC_N(hash_x64_128) +#define hash_x86_128 JEMALLOC_N(hash_x86_128) +#define hash_x86_32 JEMALLOC_N(hash_x86_32) +#define huge_aalloc JEMALLOC_N(huge_aalloc) +#define huge_dalloc JEMALLOC_N(huge_dalloc) +#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) +#define huge_malloc JEMALLOC_N(huge_malloc) +#define huge_palloc JEMALLOC_N(huge_palloc) +#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get) +#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset) +#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set) +#define huge_ralloc JEMALLOC_N(huge_ralloc) +#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) +#define huge_salloc JEMALLOC_N(huge_salloc) +#define iaalloc JEMALLOC_N(iaalloc) +#define ialloc JEMALLOC_N(ialloc) +#define iallocztm JEMALLOC_N(iallocztm) +#define iarena_cleanup JEMALLOC_N(iarena_cleanup) +#define idalloc JEMALLOC_N(idalloc) +#define idalloctm JEMALLOC_N(idalloctm) +#define in_valgrind JEMALLOC_N(in_valgrind) +#define index2size JEMALLOC_N(index2size) +#define index2size_compute JEMALLOC_N(index2size_compute) +#define index2size_lookup JEMALLOC_N(index2size_lookup) +#define index2size_tab JEMALLOC_N(index2size_tab) +#define ipalloc JEMALLOC_N(ipalloc) +#define ipalloct JEMALLOC_N(ipalloct) +#define ipallocztm JEMALLOC_N(ipallocztm) +#define iqalloc JEMALLOC_N(iqalloc) +#define iralloc JEMALLOC_N(iralloc) +#define iralloct JEMALLOC_N(iralloct) +#define iralloct_realign JEMALLOC_N(iralloct_realign) +#define isalloc JEMALLOC_N(isalloc) +#define isdalloct JEMALLOC_N(isdalloct) +#define isqalloc JEMALLOC_N(isqalloc) +#define isthreaded JEMALLOC_N(isthreaded) +#define ivsalloc JEMALLOC_N(ivsalloc) +#define ixalloc JEMALLOC_N(ixalloc) +#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) +#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) +#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) +#define large_maxclass JEMALLOC_N(large_maxclass) +#define lg_floor JEMALLOC_N(lg_floor) +#define lg_prof_sample JEMALLOC_N(lg_prof_sample) +#define malloc_cprintf JEMALLOC_N(malloc_cprintf) +#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner) +#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner) +#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot) +#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) +#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) +#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) +#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) +#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) +#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) +#define malloc_printf JEMALLOC_N(malloc_printf) +#define malloc_snprintf JEMALLOC_N(malloc_snprintf) +#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) +#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0) +#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1) +#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) +#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) +#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) +#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup) +#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) +#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) +#define malloc_write JEMALLOC_N(malloc_write) +#define map_bias JEMALLOC_N(map_bias) +#define map_misc_offset JEMALLOC_N(map_misc_offset) +#define mb_write JEMALLOC_N(mb_write) +#define narenas_auto JEMALLOC_N(narenas_auto) +#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup) +#define narenas_total_get JEMALLOC_N(narenas_total_get) +#define ncpus JEMALLOC_N(ncpus) +#define nhbins JEMALLOC_N(nhbins) +#define nhclasses JEMALLOC_N(nhclasses) +#define nlclasses JEMALLOC_N(nlclasses) +#define nstime_add JEMALLOC_N(nstime_add) +#define nstime_compare JEMALLOC_N(nstime_compare) +#define nstime_copy JEMALLOC_N(nstime_copy) +#define nstime_divide JEMALLOC_N(nstime_divide) +#define nstime_idivide JEMALLOC_N(nstime_idivide) +#define nstime_imultiply JEMALLOC_N(nstime_imultiply) +#define nstime_init JEMALLOC_N(nstime_init) +#define nstime_init2 JEMALLOC_N(nstime_init2) +#define nstime_monotonic JEMALLOC_N(nstime_monotonic) +#define nstime_ns JEMALLOC_N(nstime_ns) +#define nstime_nsec JEMALLOC_N(nstime_nsec) +#define nstime_sec JEMALLOC_N(nstime_sec) +#define nstime_subtract JEMALLOC_N(nstime_subtract) +#define nstime_update JEMALLOC_N(nstime_update) +#define opt_abort JEMALLOC_N(opt_abort) +#define opt_decay_time JEMALLOC_N(opt_decay_time) +#define opt_dss JEMALLOC_N(opt_dss) +#define opt_junk JEMALLOC_N(opt_junk) +#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc) +#define opt_junk_free JEMALLOC_N(opt_junk_free) +#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) +#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) +#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) +#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) +#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) +#define opt_narenas JEMALLOC_N(opt_narenas) +#define opt_prof JEMALLOC_N(opt_prof) +#define opt_prof_accum JEMALLOC_N(opt_prof_accum) +#define opt_prof_active JEMALLOC_N(opt_prof_active) +#define opt_prof_final JEMALLOC_N(opt_prof_final) +#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) +#define opt_prof_leak JEMALLOC_N(opt_prof_leak) +#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) +#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init) +#define opt_purge JEMALLOC_N(opt_purge) +#define opt_quarantine JEMALLOC_N(opt_quarantine) +#define opt_redzone JEMALLOC_N(opt_redzone) +#define opt_stats_print JEMALLOC_N(opt_stats_print) +#define opt_tcache JEMALLOC_N(opt_tcache) +#define opt_thp JEMALLOC_N(opt_thp) +#define opt_utrace JEMALLOC_N(opt_utrace) +#define opt_xmalloc JEMALLOC_N(opt_xmalloc) +#define opt_zero JEMALLOC_N(opt_zero) +#define p2rz JEMALLOC_N(p2rz) +#define pages_boot JEMALLOC_N(pages_boot) +#define pages_commit JEMALLOC_N(pages_commit) +#define pages_decommit JEMALLOC_N(pages_decommit) +#define pages_huge JEMALLOC_N(pages_huge) +#define pages_map JEMALLOC_N(pages_map) +#define pages_nohuge JEMALLOC_N(pages_nohuge) +#define pages_purge JEMALLOC_N(pages_purge) +#define pages_trim JEMALLOC_N(pages_trim) +#define pages_unmap JEMALLOC_N(pages_unmap) +#define pind2sz JEMALLOC_N(pind2sz) +#define pind2sz_compute JEMALLOC_N(pind2sz_compute) +#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup) +#define pind2sz_tab JEMALLOC_N(pind2sz_tab) +#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32) +#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64) +#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu) +#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32) +#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64) +#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu) +#define prng_range_u32 JEMALLOC_N(prng_range_u32) +#define prng_range_u64 JEMALLOC_N(prng_range_u64) +#define prng_range_zu JEMALLOC_N(prng_range_zu) +#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32) +#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64) +#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu) +#define prof_active JEMALLOC_N(prof_active) +#define prof_active_get JEMALLOC_N(prof_active_get) +#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked) +#define prof_active_set JEMALLOC_N(prof_active_set) +#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep) +#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback) +#define prof_backtrace JEMALLOC_N(prof_backtrace) +#define prof_boot0 JEMALLOC_N(prof_boot0) +#define prof_boot1 JEMALLOC_N(prof_boot1) +#define prof_boot2 JEMALLOC_N(prof_boot2) +#define prof_bt_count JEMALLOC_N(prof_bt_count) +#define prof_dump_header JEMALLOC_N(prof_dump_header) +#define prof_dump_open JEMALLOC_N(prof_dump_open) +#define prof_free JEMALLOC_N(prof_free) +#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object) +#define prof_gdump JEMALLOC_N(prof_gdump) +#define prof_gdump_get JEMALLOC_N(prof_gdump_get) +#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked) +#define prof_gdump_set JEMALLOC_N(prof_gdump_set) +#define prof_gdump_val JEMALLOC_N(prof_gdump_val) +#define prof_idump JEMALLOC_N(prof_idump) +#define prof_interval JEMALLOC_N(prof_interval) +#define prof_lookup JEMALLOC_N(prof_lookup) +#define prof_malloc JEMALLOC_N(prof_malloc) +#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object) +#define prof_mdump JEMALLOC_N(prof_mdump) +#define prof_postfork_child JEMALLOC_N(prof_postfork_child) +#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) +#define prof_prefork0 JEMALLOC_N(prof_prefork0) +#define prof_prefork1 JEMALLOC_N(prof_prefork1) +#define prof_realloc JEMALLOC_N(prof_realloc) +#define prof_reset JEMALLOC_N(prof_reset) +#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) +#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) +#define prof_tctx_get JEMALLOC_N(prof_tctx_get) +#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset) +#define prof_tctx_set JEMALLOC_N(prof_tctx_set) +#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) +#define prof_tdata_count JEMALLOC_N(prof_tdata_count) +#define prof_tdata_get JEMALLOC_N(prof_tdata_get) +#define prof_tdata_init JEMALLOC_N(prof_tdata_init) +#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit) +#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get) +#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get) +#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set) +#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set) +#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get) +#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set) +#define psz2ind JEMALLOC_N(psz2ind) +#define psz2u JEMALLOC_N(psz2u) +#define purge_mode_names JEMALLOC_N(purge_mode_names) +#define quarantine JEMALLOC_N(quarantine) +#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) +#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work) +#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) +#define rtree_child_read JEMALLOC_N(rtree_child_read) +#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard) +#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread) +#define rtree_delete JEMALLOC_N(rtree_delete) +#define rtree_get JEMALLOC_N(rtree_get) +#define rtree_new JEMALLOC_N(rtree_new) +#define rtree_node_valid JEMALLOC_N(rtree_node_valid) +#define rtree_set JEMALLOC_N(rtree_set) +#define rtree_start_level JEMALLOC_N(rtree_start_level) +#define rtree_subkey JEMALLOC_N(rtree_subkey) +#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read) +#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard) +#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread) +#define rtree_val_read JEMALLOC_N(rtree_val_read) +#define rtree_val_write JEMALLOC_N(rtree_val_write) +#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) +#define run_quantize_floor JEMALLOC_N(run_quantize_floor) +#define s2u JEMALLOC_N(s2u) +#define s2u_compute JEMALLOC_N(s2u_compute) +#define s2u_lookup JEMALLOC_N(s2u_lookup) +#define sa2u JEMALLOC_N(sa2u) +#define set_errno JEMALLOC_N(set_errno) +#define size2index JEMALLOC_N(size2index) +#define size2index_compute JEMALLOC_N(size2index_compute) +#define size2index_lookup JEMALLOC_N(size2index_lookup) +#define size2index_tab JEMALLOC_N(size2index_tab) +#define spin_adaptive JEMALLOC_N(spin_adaptive) +#define spin_init JEMALLOC_N(spin_init) +#define stats_cactive JEMALLOC_N(stats_cactive) +#define stats_cactive_add JEMALLOC_N(stats_cactive_add) +#define stats_cactive_get JEMALLOC_N(stats_cactive_get) +#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) +#define stats_print JEMALLOC_N(stats_print) +#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) +#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) +#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) +#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) +#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate) +#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) +#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) +#define tcache_bin_info JEMALLOC_N(tcache_bin_info) +#define tcache_boot JEMALLOC_N(tcache_boot) +#define tcache_cleanup JEMALLOC_N(tcache_cleanup) +#define tcache_create JEMALLOC_N(tcache_create) +#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) +#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) +#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup) +#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) +#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) +#define tcache_event JEMALLOC_N(tcache_event) +#define tcache_event_hard JEMALLOC_N(tcache_event_hard) +#define tcache_flush JEMALLOC_N(tcache_flush) +#define tcache_get JEMALLOC_N(tcache_get) +#define tcache_get_hard JEMALLOC_N(tcache_get_hard) +#define tcache_maxclass JEMALLOC_N(tcache_maxclass) +#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child) +#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent) +#define tcache_prefork JEMALLOC_N(tcache_prefork) +#define tcache_salloc JEMALLOC_N(tcache_salloc) +#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) +#define tcaches JEMALLOC_N(tcaches) +#define tcaches_create JEMALLOC_N(tcaches_create) +#define tcaches_destroy JEMALLOC_N(tcaches_destroy) +#define tcaches_flush JEMALLOC_N(tcaches_flush) +#define tcaches_get JEMALLOC_N(tcaches_get) +#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup) +#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup) +#define ticker_copy JEMALLOC_N(ticker_copy) +#define ticker_init JEMALLOC_N(ticker_init) +#define ticker_read JEMALLOC_N(ticker_read) +#define ticker_tick JEMALLOC_N(ticker_tick) +#define ticker_ticks JEMALLOC_N(ticker_ticks) +#define tsd_arena_get JEMALLOC_N(tsd_arena_get) +#define tsd_arena_set JEMALLOC_N(tsd_arena_set) +#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get) +#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get) +#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set) +#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get) +#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get) +#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set) +#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get) +#define tsd_boot JEMALLOC_N(tsd_boot) +#define tsd_boot0 JEMALLOC_N(tsd_boot0) +#define tsd_boot1 JEMALLOC_N(tsd_boot1) +#define tsd_booted JEMALLOC_N(tsd_booted) +#define tsd_booted_get JEMALLOC_N(tsd_booted_get) +#define tsd_cleanup JEMALLOC_N(tsd_cleanup) +#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper) +#define tsd_fetch JEMALLOC_N(tsd_fetch) +#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl) +#define tsd_get JEMALLOC_N(tsd_get) +#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates) +#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get) +#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set) +#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get) +#define tsd_initialized JEMALLOC_N(tsd_initialized) +#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion) +#define tsd_init_finish JEMALLOC_N(tsd_init_finish) +#define tsd_init_head JEMALLOC_N(tsd_init_head) +#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get) +#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set) +#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get) +#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get) +#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set) +#define tsd_nominal JEMALLOC_N(tsd_nominal) +#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get) +#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set) +#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get) +#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get) +#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set) +#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get) +#define tsd_set JEMALLOC_N(tsd_set) +#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get) +#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set) +#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get) +#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get) +#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set) +#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get) +#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get) +#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set) +#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get) +#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get) +#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set) +#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get) +#define tsd_tls JEMALLOC_N(tsd_tls) +#define tsd_tsd JEMALLOC_N(tsd_tsd) +#define tsd_tsdn JEMALLOC_N(tsd_tsdn) +#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get) +#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set) +#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get) +#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get) +#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set) +#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get) +#define tsdn_fetch JEMALLOC_N(tsdn_fetch) +#define tsdn_null JEMALLOC_N(tsdn_null) +#define tsdn_tsd JEMALLOC_N(tsdn_tsd) +#define u2rz JEMALLOC_N(u2rz) +#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block) +#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined) +#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess) +#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined) +#define witness_assert_depth JEMALLOC_N(witness_assert_depth) +#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank) +#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless) +#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner) +#define witness_assert_owner JEMALLOC_N(witness_assert_owner) +#define witness_depth_error JEMALLOC_N(witness_depth_error) +#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup) +#define witness_init JEMALLOC_N(witness_init) +#define witness_lock JEMALLOC_N(witness_lock) +#define witness_lock_error JEMALLOC_N(witness_lock_error) +#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) +#define witness_owner JEMALLOC_N(witness_owner) +#define witness_owner_error JEMALLOC_N(witness_owner_error) +#define witness_postfork_child JEMALLOC_N(witness_postfork_child) +#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent) +#define witness_prefork JEMALLOC_N(witness_prefork) +#define witness_unlock JEMALLOC_N(witness_unlock) +#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup) +#define zone_register JEMALLOC_N(zone_register) diff --git a/sources/jemalloc/include-linux/jemalloc/internal/private_symbols.txt b/sources/jemalloc/include-linux/jemalloc/internal/private_symbols.txt new file mode 100755 index 00000000..60b57e5a --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/private_symbols.txt @@ -0,0 +1,639 @@ +a0dalloc +a0get +a0malloc +arena_aalloc +arena_alloc_junk_small +arena_basic_stats_merge +arena_bin_index +arena_bin_info +arena_bitselm_get_const +arena_bitselm_get_mutable +arena_boot +arena_choose +arena_choose_hard +arena_choose_impl +arena_chunk_alloc_huge +arena_chunk_cache_maybe_insert +arena_chunk_cache_maybe_remove +arena_chunk_dalloc_huge +arena_chunk_ralloc_huge_expand +arena_chunk_ralloc_huge_shrink +arena_chunk_ralloc_huge_similar +arena_cleanup +arena_dalloc +arena_dalloc_bin +arena_dalloc_bin_junked_locked +arena_dalloc_junk_large +arena_dalloc_junk_small +arena_dalloc_large +arena_dalloc_large_junked_locked +arena_dalloc_small +arena_decay_tick +arena_decay_ticks +arena_decay_time_default_get +arena_decay_time_default_set +arena_decay_time_get +arena_decay_time_set +arena_dss_prec_get +arena_dss_prec_set +arena_extent_sn_next +arena_get +arena_ichoose +arena_init +arena_lg_dirty_mult_default_get +arena_lg_dirty_mult_default_set +arena_lg_dirty_mult_get +arena_lg_dirty_mult_set +arena_malloc +arena_malloc_hard +arena_malloc_large +arena_mapbits_allocated_get +arena_mapbits_binind_get +arena_mapbits_decommitted_get +arena_mapbits_dirty_get +arena_mapbits_get +arena_mapbits_internal_set +arena_mapbits_large_binind_set +arena_mapbits_large_get +arena_mapbits_large_set +arena_mapbits_large_size_get +arena_mapbits_size_decode +arena_mapbits_size_encode +arena_mapbits_small_runind_get +arena_mapbits_small_set +arena_mapbits_unallocated_set +arena_mapbits_unallocated_size_get +arena_mapbits_unallocated_size_set +arena_mapbits_unzeroed_get +arena_mapbitsp_get_const +arena_mapbitsp_get_mutable +arena_mapbitsp_read +arena_mapbitsp_write +arena_maxrun +arena_maybe_purge +arena_metadata_allocated_add +arena_metadata_allocated_get +arena_metadata_allocated_sub +arena_migrate +arena_miscelm_get_const +arena_miscelm_get_mutable +arena_miscelm_to_pageind +arena_miscelm_to_rpages +arena_new +arena_node_alloc +arena_node_dalloc +arena_nthreads_dec +arena_nthreads_get +arena_nthreads_inc +arena_palloc +arena_postfork_child +arena_postfork_parent +arena_prefork0 +arena_prefork1 +arena_prefork2 +arena_prefork3 +arena_prof_accum +arena_prof_accum_impl +arena_prof_accum_locked +arena_prof_promoted +arena_prof_tctx_get +arena_prof_tctx_reset +arena_prof_tctx_set +arena_ptr_small_binind_get +arena_purge +arena_quarantine_junk_small +arena_ralloc +arena_ralloc_junk_large +arena_ralloc_no_move +arena_rd_to_miscelm +arena_redzone_corruption +arena_reset +arena_run_regind +arena_run_to_miscelm +arena_salloc +arena_sdalloc +arena_stats_merge +arena_tcache_fill_small +arena_tdata_get +arena_tdata_get_hard +arenas +arenas_tdata_bypass_cleanup +arenas_tdata_cleanup +atomic_add_p +atomic_add_u +atomic_add_uint32 +atomic_add_uint64 +atomic_add_z +atomic_cas_p +atomic_cas_u +atomic_cas_uint32 +atomic_cas_uint64 +atomic_cas_z +atomic_sub_p +atomic_sub_u +atomic_sub_uint32 +atomic_sub_uint64 +atomic_sub_z +atomic_write_p +atomic_write_u +atomic_write_uint32 +atomic_write_uint64 +atomic_write_z +base_alloc +base_boot +base_postfork_child +base_postfork_parent +base_prefork +base_stats_get +bitmap_full +bitmap_get +bitmap_info_init +bitmap_init +bitmap_set +bitmap_sfu +bitmap_size +bitmap_unset +bootstrap_calloc +bootstrap_free +bootstrap_malloc +bt_init +buferror +chunk_alloc_base +chunk_alloc_cache +chunk_alloc_dss +chunk_alloc_mmap +chunk_alloc_wrapper +chunk_boot +chunk_dalloc_cache +chunk_dalloc_mmap +chunk_dalloc_wrapper +chunk_deregister +chunk_dss_boot +chunk_dss_mergeable +chunk_dss_prec_get +chunk_dss_prec_set +chunk_hooks_default +chunk_hooks_get +chunk_hooks_set +chunk_in_dss +chunk_lookup +chunk_npages +chunk_purge_wrapper +chunk_register +chunks_rtree +chunksize +chunksize_mask +ckh_count +ckh_delete +ckh_insert +ckh_iter +ckh_new +ckh_pointer_hash +ckh_pointer_keycomp +ckh_remove +ckh_search +ckh_string_hash +ckh_string_keycomp +ctl_boot +ctl_bymib +ctl_byname +ctl_nametomib +ctl_postfork_child +ctl_postfork_parent +ctl_prefork +decay_ticker_get +dss_prec_names +extent_node_achunk_get +extent_node_achunk_set +extent_node_addr_get +extent_node_addr_set +extent_node_arena_get +extent_node_arena_set +extent_node_committed_get +extent_node_committed_set +extent_node_dirty_insert +extent_node_dirty_linkage_init +extent_node_dirty_remove +extent_node_init +extent_node_prof_tctx_get +extent_node_prof_tctx_set +extent_node_size_get +extent_node_size_set +extent_node_sn_get +extent_node_sn_set +extent_node_zeroed_get +extent_node_zeroed_set +extent_size_quantize_ceil +extent_size_quantize_floor +extent_tree_ad_destroy +extent_tree_ad_destroy_recurse +extent_tree_ad_empty +extent_tree_ad_first +extent_tree_ad_insert +extent_tree_ad_iter +extent_tree_ad_iter_recurse +extent_tree_ad_iter_start +extent_tree_ad_last +extent_tree_ad_new +extent_tree_ad_next +extent_tree_ad_nsearch +extent_tree_ad_prev +extent_tree_ad_psearch +extent_tree_ad_remove +extent_tree_ad_reverse_iter +extent_tree_ad_reverse_iter_recurse +extent_tree_ad_reverse_iter_start +extent_tree_ad_search +extent_tree_szsnad_destroy +extent_tree_szsnad_destroy_recurse +extent_tree_szsnad_empty +extent_tree_szsnad_first +extent_tree_szsnad_insert +extent_tree_szsnad_iter +extent_tree_szsnad_iter_recurse +extent_tree_szsnad_iter_start +extent_tree_szsnad_last +extent_tree_szsnad_new +extent_tree_szsnad_next +extent_tree_szsnad_nsearch +extent_tree_szsnad_prev +extent_tree_szsnad_psearch +extent_tree_szsnad_remove +extent_tree_szsnad_reverse_iter +extent_tree_szsnad_reverse_iter_recurse +extent_tree_szsnad_reverse_iter_start +extent_tree_szsnad_search +ffs_llu +ffs_lu +ffs_u +ffs_u32 +ffs_u64 +ffs_zu +get_errno +hash +hash_fmix_32 +hash_fmix_64 +hash_get_block_32 +hash_get_block_64 +hash_rotl_32 +hash_rotl_64 +hash_x64_128 +hash_x86_128 +hash_x86_32 +huge_aalloc +huge_dalloc +huge_dalloc_junk +huge_malloc +huge_palloc +huge_prof_tctx_get +huge_prof_tctx_reset +huge_prof_tctx_set +huge_ralloc +huge_ralloc_no_move +huge_salloc +iaalloc +ialloc +iallocztm +iarena_cleanup +idalloc +idalloctm +in_valgrind +index2size +index2size_compute +index2size_lookup +index2size_tab +ipalloc +ipalloct +ipallocztm +iqalloc +iralloc +iralloct +iralloct_realign +isalloc +isdalloct +isqalloc +isthreaded +ivsalloc +ixalloc +jemalloc_postfork_child +jemalloc_postfork_parent +jemalloc_prefork +large_maxclass +lg_floor +lg_prof_sample +malloc_cprintf +malloc_mutex_assert_not_owner +malloc_mutex_assert_owner +malloc_mutex_boot +malloc_mutex_init +malloc_mutex_lock +malloc_mutex_postfork_child +malloc_mutex_postfork_parent +malloc_mutex_prefork +malloc_mutex_unlock +malloc_printf +malloc_snprintf +malloc_strtoumax +malloc_tsd_boot0 +malloc_tsd_boot1 +malloc_tsd_cleanup_register +malloc_tsd_dalloc +malloc_tsd_malloc +malloc_tsd_no_cleanup +malloc_vcprintf +malloc_vsnprintf +malloc_write +map_bias +map_misc_offset +mb_write +narenas_auto +narenas_tdata_cleanup +narenas_total_get +ncpus +nhbins +nhclasses +nlclasses +nstime_add +nstime_compare +nstime_copy +nstime_divide +nstime_idivide +nstime_imultiply +nstime_init +nstime_init2 +nstime_monotonic +nstime_ns +nstime_nsec +nstime_sec +nstime_subtract +nstime_update +opt_abort +opt_decay_time +opt_dss +opt_junk +opt_junk_alloc +opt_junk_free +opt_lg_chunk +opt_lg_dirty_mult +opt_lg_prof_interval +opt_lg_prof_sample +opt_lg_tcache_max +opt_narenas +opt_prof +opt_prof_accum +opt_prof_active +opt_prof_final +opt_prof_gdump +opt_prof_leak +opt_prof_prefix +opt_prof_thread_active_init +opt_purge +opt_quarantine +opt_redzone +opt_stats_print +opt_tcache +opt_thp +opt_utrace +opt_xmalloc +opt_zero +p2rz +pages_boot +pages_commit +pages_decommit +pages_huge +pages_map +pages_nohuge +pages_purge +pages_trim +pages_unmap +pind2sz +pind2sz_compute +pind2sz_lookup +pind2sz_tab +pow2_ceil_u32 +pow2_ceil_u64 +pow2_ceil_zu +prng_lg_range_u32 +prng_lg_range_u64 +prng_lg_range_zu +prng_range_u32 +prng_range_u64 +prng_range_zu +prng_state_next_u32 +prng_state_next_u64 +prng_state_next_zu +prof_active +prof_active_get +prof_active_get_unlocked +prof_active_set +prof_alloc_prep +prof_alloc_rollback +prof_backtrace +prof_boot0 +prof_boot1 +prof_boot2 +prof_bt_count +prof_dump_header +prof_dump_open +prof_free +prof_free_sampled_object +prof_gdump +prof_gdump_get +prof_gdump_get_unlocked +prof_gdump_set +prof_gdump_val +prof_idump +prof_interval +prof_lookup +prof_malloc +prof_malloc_sample_object +prof_mdump +prof_postfork_child +prof_postfork_parent +prof_prefork0 +prof_prefork1 +prof_realloc +prof_reset +prof_sample_accum_update +prof_sample_threshold_update +prof_tctx_get +prof_tctx_reset +prof_tctx_set +prof_tdata_cleanup +prof_tdata_count +prof_tdata_get +prof_tdata_init +prof_tdata_reinit +prof_thread_active_get +prof_thread_active_init_get +prof_thread_active_init_set +prof_thread_active_set +prof_thread_name_get +prof_thread_name_set +psz2ind +psz2u +purge_mode_names +quarantine +quarantine_alloc_hook +quarantine_alloc_hook_work +quarantine_cleanup +rtree_child_read +rtree_child_read_hard +rtree_child_tryread +rtree_delete +rtree_get +rtree_new +rtree_node_valid +rtree_set +rtree_start_level +rtree_subkey +rtree_subtree_read +rtree_subtree_read_hard +rtree_subtree_tryread +rtree_val_read +rtree_val_write +run_quantize_ceil +run_quantize_floor +s2u +s2u_compute +s2u_lookup +sa2u +set_errno +size2index +size2index_compute +size2index_lookup +size2index_tab +spin_adaptive +spin_init +stats_cactive +stats_cactive_add +stats_cactive_get +stats_cactive_sub +stats_print +tcache_alloc_easy +tcache_alloc_large +tcache_alloc_small +tcache_alloc_small_hard +tcache_arena_reassociate +tcache_bin_flush_large +tcache_bin_flush_small +tcache_bin_info +tcache_boot +tcache_cleanup +tcache_create +tcache_dalloc_large +tcache_dalloc_small +tcache_enabled_cleanup +tcache_enabled_get +tcache_enabled_set +tcache_event +tcache_event_hard +tcache_flush +tcache_get +tcache_get_hard +tcache_maxclass +tcache_postfork_child +tcache_postfork_parent +tcache_prefork +tcache_salloc +tcache_stats_merge +tcaches +tcaches_create +tcaches_destroy +tcaches_flush +tcaches_get +thread_allocated_cleanup +thread_deallocated_cleanup +ticker_copy +ticker_init +ticker_read +ticker_tick +ticker_ticks +tsd_arena_get +tsd_arena_set +tsd_arenap_get +tsd_arenas_tdata_bypass_get +tsd_arenas_tdata_bypass_set +tsd_arenas_tdata_bypassp_get +tsd_arenas_tdata_get +tsd_arenas_tdata_set +tsd_arenas_tdatap_get +tsd_boot +tsd_boot0 +tsd_boot1 +tsd_booted +tsd_booted_get +tsd_cleanup +tsd_cleanup_wrapper +tsd_fetch +tsd_fetch_impl +tsd_get +tsd_get_allocates +tsd_iarena_get +tsd_iarena_set +tsd_iarenap_get +tsd_initialized +tsd_init_check_recursion +tsd_init_finish +tsd_init_head +tsd_narenas_tdata_get +tsd_narenas_tdata_set +tsd_narenas_tdatap_get +tsd_wrapper_get +tsd_wrapper_set +tsd_nominal +tsd_prof_tdata_get +tsd_prof_tdata_set +tsd_prof_tdatap_get +tsd_quarantine_get +tsd_quarantine_set +tsd_quarantinep_get +tsd_set +tsd_tcache_enabled_get +tsd_tcache_enabled_set +tsd_tcache_enabledp_get +tsd_tcache_get +tsd_tcache_set +tsd_tcachep_get +tsd_thread_allocated_get +tsd_thread_allocated_set +tsd_thread_allocatedp_get +tsd_thread_deallocated_get +tsd_thread_deallocated_set +tsd_thread_deallocatedp_get +tsd_tls +tsd_tsd +tsd_tsdn +tsd_witness_fork_get +tsd_witness_fork_set +tsd_witness_forkp_get +tsd_witnesses_get +tsd_witnesses_set +tsd_witnessesp_get +tsdn_fetch +tsdn_null +tsdn_tsd +u2rz +valgrind_freelike_block +valgrind_make_mem_defined +valgrind_make_mem_noaccess +valgrind_make_mem_undefined +witness_assert_depth +witness_assert_depth_to_rank +witness_assert_lockless +witness_assert_not_owner +witness_assert_owner +witness_depth_error +witness_fork_cleanup +witness_init +witness_lock +witness_lock_error +witness_not_owner_error +witness_owner +witness_owner_error +witness_postfork_child +witness_postfork_parent +witness_prefork +witness_unlock +witnesses_cleanup +zone_register diff --git a/sources/jemalloc/include-linux/jemalloc/internal/private_unnamespace.h b/sources/jemalloc/include-linux/jemalloc/internal/private_unnamespace.h new file mode 100755 index 00000000..27038c63 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/private_unnamespace.h @@ -0,0 +1,639 @@ +#undef a0dalloc +#undef a0get +#undef a0malloc +#undef arena_aalloc +#undef arena_alloc_junk_small +#undef arena_basic_stats_merge +#undef arena_bin_index +#undef arena_bin_info +#undef arena_bitselm_get_const +#undef arena_bitselm_get_mutable +#undef arena_boot +#undef arena_choose +#undef arena_choose_hard +#undef arena_choose_impl +#undef arena_chunk_alloc_huge +#undef arena_chunk_cache_maybe_insert +#undef arena_chunk_cache_maybe_remove +#undef arena_chunk_dalloc_huge +#undef arena_chunk_ralloc_huge_expand +#undef arena_chunk_ralloc_huge_shrink +#undef arena_chunk_ralloc_huge_similar +#undef arena_cleanup +#undef arena_dalloc +#undef arena_dalloc_bin +#undef arena_dalloc_bin_junked_locked +#undef arena_dalloc_junk_large +#undef arena_dalloc_junk_small +#undef arena_dalloc_large +#undef arena_dalloc_large_junked_locked +#undef arena_dalloc_small +#undef arena_decay_tick +#undef arena_decay_ticks +#undef arena_decay_time_default_get +#undef arena_decay_time_default_set +#undef arena_decay_time_get +#undef arena_decay_time_set +#undef arena_dss_prec_get +#undef arena_dss_prec_set +#undef arena_extent_sn_next +#undef arena_get +#undef arena_ichoose +#undef arena_init +#undef arena_lg_dirty_mult_default_get +#undef arena_lg_dirty_mult_default_set +#undef arena_lg_dirty_mult_get +#undef arena_lg_dirty_mult_set +#undef arena_malloc +#undef arena_malloc_hard +#undef arena_malloc_large +#undef arena_mapbits_allocated_get +#undef arena_mapbits_binind_get +#undef arena_mapbits_decommitted_get +#undef arena_mapbits_dirty_get +#undef arena_mapbits_get +#undef arena_mapbits_internal_set +#undef arena_mapbits_large_binind_set +#undef arena_mapbits_large_get +#undef arena_mapbits_large_set +#undef arena_mapbits_large_size_get +#undef arena_mapbits_size_decode +#undef arena_mapbits_size_encode +#undef arena_mapbits_small_runind_get +#undef arena_mapbits_small_set +#undef arena_mapbits_unallocated_set +#undef arena_mapbits_unallocated_size_get +#undef arena_mapbits_unallocated_size_set +#undef arena_mapbits_unzeroed_get +#undef arena_mapbitsp_get_const +#undef arena_mapbitsp_get_mutable +#undef arena_mapbitsp_read +#undef arena_mapbitsp_write +#undef arena_maxrun +#undef arena_maybe_purge +#undef arena_metadata_allocated_add +#undef arena_metadata_allocated_get +#undef arena_metadata_allocated_sub +#undef arena_migrate +#undef arena_miscelm_get_const +#undef arena_miscelm_get_mutable +#undef arena_miscelm_to_pageind +#undef arena_miscelm_to_rpages +#undef arena_new +#undef arena_node_alloc +#undef arena_node_dalloc +#undef arena_nthreads_dec +#undef arena_nthreads_get +#undef arena_nthreads_inc +#undef arena_palloc +#undef arena_postfork_child +#undef arena_postfork_parent +#undef arena_prefork0 +#undef arena_prefork1 +#undef arena_prefork2 +#undef arena_prefork3 +#undef arena_prof_accum +#undef arena_prof_accum_impl +#undef arena_prof_accum_locked +#undef arena_prof_promoted +#undef arena_prof_tctx_get +#undef arena_prof_tctx_reset +#undef arena_prof_tctx_set +#undef arena_ptr_small_binind_get +#undef arena_purge +#undef arena_quarantine_junk_small +#undef arena_ralloc +#undef arena_ralloc_junk_large +#undef arena_ralloc_no_move +#undef arena_rd_to_miscelm +#undef arena_redzone_corruption +#undef arena_reset +#undef arena_run_regind +#undef arena_run_to_miscelm +#undef arena_salloc +#undef arena_sdalloc +#undef arena_stats_merge +#undef arena_tcache_fill_small +#undef arena_tdata_get +#undef arena_tdata_get_hard +#undef arenas +#undef arenas_tdata_bypass_cleanup +#undef arenas_tdata_cleanup +#undef atomic_add_p +#undef atomic_add_u +#undef atomic_add_uint32 +#undef atomic_add_uint64 +#undef atomic_add_z +#undef atomic_cas_p +#undef atomic_cas_u +#undef atomic_cas_uint32 +#undef atomic_cas_uint64 +#undef atomic_cas_z +#undef atomic_sub_p +#undef atomic_sub_u +#undef atomic_sub_uint32 +#undef atomic_sub_uint64 +#undef atomic_sub_z +#undef atomic_write_p +#undef atomic_write_u +#undef atomic_write_uint32 +#undef atomic_write_uint64 +#undef atomic_write_z +#undef base_alloc +#undef base_boot +#undef base_postfork_child +#undef base_postfork_parent +#undef base_prefork +#undef base_stats_get +#undef bitmap_full +#undef bitmap_get +#undef bitmap_info_init +#undef bitmap_init +#undef bitmap_set +#undef bitmap_sfu +#undef bitmap_size +#undef bitmap_unset +#undef bootstrap_calloc +#undef bootstrap_free +#undef bootstrap_malloc +#undef bt_init +#undef buferror +#undef chunk_alloc_base +#undef chunk_alloc_cache +#undef chunk_alloc_dss +#undef chunk_alloc_mmap +#undef chunk_alloc_wrapper +#undef chunk_boot +#undef chunk_dalloc_cache +#undef chunk_dalloc_mmap +#undef chunk_dalloc_wrapper +#undef chunk_deregister +#undef chunk_dss_boot +#undef chunk_dss_mergeable +#undef chunk_dss_prec_get +#undef chunk_dss_prec_set +#undef chunk_hooks_default +#undef chunk_hooks_get +#undef chunk_hooks_set +#undef chunk_in_dss +#undef chunk_lookup +#undef chunk_npages +#undef chunk_purge_wrapper +#undef chunk_register +#undef chunks_rtree +#undef chunksize +#undef chunksize_mask +#undef ckh_count +#undef ckh_delete +#undef ckh_insert +#undef ckh_iter +#undef ckh_new +#undef ckh_pointer_hash +#undef ckh_pointer_keycomp +#undef ckh_remove +#undef ckh_search +#undef ckh_string_hash +#undef ckh_string_keycomp +#undef ctl_boot +#undef ctl_bymib +#undef ctl_byname +#undef ctl_nametomib +#undef ctl_postfork_child +#undef ctl_postfork_parent +#undef ctl_prefork +#undef decay_ticker_get +#undef dss_prec_names +#undef extent_node_achunk_get +#undef extent_node_achunk_set +#undef extent_node_addr_get +#undef extent_node_addr_set +#undef extent_node_arena_get +#undef extent_node_arena_set +#undef extent_node_committed_get +#undef extent_node_committed_set +#undef extent_node_dirty_insert +#undef extent_node_dirty_linkage_init +#undef extent_node_dirty_remove +#undef extent_node_init +#undef extent_node_prof_tctx_get +#undef extent_node_prof_tctx_set +#undef extent_node_size_get +#undef extent_node_size_set +#undef extent_node_sn_get +#undef extent_node_sn_set +#undef extent_node_zeroed_get +#undef extent_node_zeroed_set +#undef extent_size_quantize_ceil +#undef extent_size_quantize_floor +#undef extent_tree_ad_destroy +#undef extent_tree_ad_destroy_recurse +#undef extent_tree_ad_empty +#undef extent_tree_ad_first +#undef extent_tree_ad_insert +#undef extent_tree_ad_iter +#undef extent_tree_ad_iter_recurse +#undef extent_tree_ad_iter_start +#undef extent_tree_ad_last +#undef extent_tree_ad_new +#undef extent_tree_ad_next +#undef extent_tree_ad_nsearch +#undef extent_tree_ad_prev +#undef extent_tree_ad_psearch +#undef extent_tree_ad_remove +#undef extent_tree_ad_reverse_iter +#undef extent_tree_ad_reverse_iter_recurse +#undef extent_tree_ad_reverse_iter_start +#undef extent_tree_ad_search +#undef extent_tree_szsnad_destroy +#undef extent_tree_szsnad_destroy_recurse +#undef extent_tree_szsnad_empty +#undef extent_tree_szsnad_first +#undef extent_tree_szsnad_insert +#undef extent_tree_szsnad_iter +#undef extent_tree_szsnad_iter_recurse +#undef extent_tree_szsnad_iter_start +#undef extent_tree_szsnad_last +#undef extent_tree_szsnad_new +#undef extent_tree_szsnad_next +#undef extent_tree_szsnad_nsearch +#undef extent_tree_szsnad_prev +#undef extent_tree_szsnad_psearch +#undef extent_tree_szsnad_remove +#undef extent_tree_szsnad_reverse_iter +#undef extent_tree_szsnad_reverse_iter_recurse +#undef extent_tree_szsnad_reverse_iter_start +#undef extent_tree_szsnad_search +#undef ffs_llu +#undef ffs_lu +#undef ffs_u +#undef ffs_u32 +#undef ffs_u64 +#undef ffs_zu +#undef get_errno +#undef hash +#undef hash_fmix_32 +#undef hash_fmix_64 +#undef hash_get_block_32 +#undef hash_get_block_64 +#undef hash_rotl_32 +#undef hash_rotl_64 +#undef hash_x64_128 +#undef hash_x86_128 +#undef hash_x86_32 +#undef huge_aalloc +#undef huge_dalloc +#undef huge_dalloc_junk +#undef huge_malloc +#undef huge_palloc +#undef huge_prof_tctx_get +#undef huge_prof_tctx_reset +#undef huge_prof_tctx_set +#undef huge_ralloc +#undef huge_ralloc_no_move +#undef huge_salloc +#undef iaalloc +#undef ialloc +#undef iallocztm +#undef iarena_cleanup +#undef idalloc +#undef idalloctm +#undef in_valgrind +#undef index2size +#undef index2size_compute +#undef index2size_lookup +#undef index2size_tab +#undef ipalloc +#undef ipalloct +#undef ipallocztm +#undef iqalloc +#undef iralloc +#undef iralloct +#undef iralloct_realign +#undef isalloc +#undef isdalloct +#undef isqalloc +#undef isthreaded +#undef ivsalloc +#undef ixalloc +#undef jemalloc_postfork_child +#undef jemalloc_postfork_parent +#undef jemalloc_prefork +#undef large_maxclass +#undef lg_floor +#undef lg_prof_sample +#undef malloc_cprintf +#undef malloc_mutex_assert_not_owner +#undef malloc_mutex_assert_owner +#undef malloc_mutex_boot +#undef malloc_mutex_init +#undef malloc_mutex_lock +#undef malloc_mutex_postfork_child +#undef malloc_mutex_postfork_parent +#undef malloc_mutex_prefork +#undef malloc_mutex_unlock +#undef malloc_printf +#undef malloc_snprintf +#undef malloc_strtoumax +#undef malloc_tsd_boot0 +#undef malloc_tsd_boot1 +#undef malloc_tsd_cleanup_register +#undef malloc_tsd_dalloc +#undef malloc_tsd_malloc +#undef malloc_tsd_no_cleanup +#undef malloc_vcprintf +#undef malloc_vsnprintf +#undef malloc_write +#undef map_bias +#undef map_misc_offset +#undef mb_write +#undef narenas_auto +#undef narenas_tdata_cleanup +#undef narenas_total_get +#undef ncpus +#undef nhbins +#undef nhclasses +#undef nlclasses +#undef nstime_add +#undef nstime_compare +#undef nstime_copy +#undef nstime_divide +#undef nstime_idivide +#undef nstime_imultiply +#undef nstime_init +#undef nstime_init2 +#undef nstime_monotonic +#undef nstime_ns +#undef nstime_nsec +#undef nstime_sec +#undef nstime_subtract +#undef nstime_update +#undef opt_abort +#undef opt_decay_time +#undef opt_dss +#undef opt_junk +#undef opt_junk_alloc +#undef opt_junk_free +#undef opt_lg_chunk +#undef opt_lg_dirty_mult +#undef opt_lg_prof_interval +#undef opt_lg_prof_sample +#undef opt_lg_tcache_max +#undef opt_narenas +#undef opt_prof +#undef opt_prof_accum +#undef opt_prof_active +#undef opt_prof_final +#undef opt_prof_gdump +#undef opt_prof_leak +#undef opt_prof_prefix +#undef opt_prof_thread_active_init +#undef opt_purge +#undef opt_quarantine +#undef opt_redzone +#undef opt_stats_print +#undef opt_tcache +#undef opt_thp +#undef opt_utrace +#undef opt_xmalloc +#undef opt_zero +#undef p2rz +#undef pages_boot +#undef pages_commit +#undef pages_decommit +#undef pages_huge +#undef pages_map +#undef pages_nohuge +#undef pages_purge +#undef pages_trim +#undef pages_unmap +#undef pind2sz +#undef pind2sz_compute +#undef pind2sz_lookup +#undef pind2sz_tab +#undef pow2_ceil_u32 +#undef pow2_ceil_u64 +#undef pow2_ceil_zu +#undef prng_lg_range_u32 +#undef prng_lg_range_u64 +#undef prng_lg_range_zu +#undef prng_range_u32 +#undef prng_range_u64 +#undef prng_range_zu +#undef prng_state_next_u32 +#undef prng_state_next_u64 +#undef prng_state_next_zu +#undef prof_active +#undef prof_active_get +#undef prof_active_get_unlocked +#undef prof_active_set +#undef prof_alloc_prep +#undef prof_alloc_rollback +#undef prof_backtrace +#undef prof_boot0 +#undef prof_boot1 +#undef prof_boot2 +#undef prof_bt_count +#undef prof_dump_header +#undef prof_dump_open +#undef prof_free +#undef prof_free_sampled_object +#undef prof_gdump +#undef prof_gdump_get +#undef prof_gdump_get_unlocked +#undef prof_gdump_set +#undef prof_gdump_val +#undef prof_idump +#undef prof_interval +#undef prof_lookup +#undef prof_malloc +#undef prof_malloc_sample_object +#undef prof_mdump +#undef prof_postfork_child +#undef prof_postfork_parent +#undef prof_prefork0 +#undef prof_prefork1 +#undef prof_realloc +#undef prof_reset +#undef prof_sample_accum_update +#undef prof_sample_threshold_update +#undef prof_tctx_get +#undef prof_tctx_reset +#undef prof_tctx_set +#undef prof_tdata_cleanup +#undef prof_tdata_count +#undef prof_tdata_get +#undef prof_tdata_init +#undef prof_tdata_reinit +#undef prof_thread_active_get +#undef prof_thread_active_init_get +#undef prof_thread_active_init_set +#undef prof_thread_active_set +#undef prof_thread_name_get +#undef prof_thread_name_set +#undef psz2ind +#undef psz2u +#undef purge_mode_names +#undef quarantine +#undef quarantine_alloc_hook +#undef quarantine_alloc_hook_work +#undef quarantine_cleanup +#undef rtree_child_read +#undef rtree_child_read_hard +#undef rtree_child_tryread +#undef rtree_delete +#undef rtree_get +#undef rtree_new +#undef rtree_node_valid +#undef rtree_set +#undef rtree_start_level +#undef rtree_subkey +#undef rtree_subtree_read +#undef rtree_subtree_read_hard +#undef rtree_subtree_tryread +#undef rtree_val_read +#undef rtree_val_write +#undef run_quantize_ceil +#undef run_quantize_floor +#undef s2u +#undef s2u_compute +#undef s2u_lookup +#undef sa2u +#undef set_errno +#undef size2index +#undef size2index_compute +#undef size2index_lookup +#undef size2index_tab +#undef spin_adaptive +#undef spin_init +#undef stats_cactive +#undef stats_cactive_add +#undef stats_cactive_get +#undef stats_cactive_sub +#undef stats_print +#undef tcache_alloc_easy +#undef tcache_alloc_large +#undef tcache_alloc_small +#undef tcache_alloc_small_hard +#undef tcache_arena_reassociate +#undef tcache_bin_flush_large +#undef tcache_bin_flush_small +#undef tcache_bin_info +#undef tcache_boot +#undef tcache_cleanup +#undef tcache_create +#undef tcache_dalloc_large +#undef tcache_dalloc_small +#undef tcache_enabled_cleanup +#undef tcache_enabled_get +#undef tcache_enabled_set +#undef tcache_event +#undef tcache_event_hard +#undef tcache_flush +#undef tcache_get +#undef tcache_get_hard +#undef tcache_maxclass +#undef tcache_postfork_child +#undef tcache_postfork_parent +#undef tcache_prefork +#undef tcache_salloc +#undef tcache_stats_merge +#undef tcaches +#undef tcaches_create +#undef tcaches_destroy +#undef tcaches_flush +#undef tcaches_get +#undef thread_allocated_cleanup +#undef thread_deallocated_cleanup +#undef ticker_copy +#undef ticker_init +#undef ticker_read +#undef ticker_tick +#undef ticker_ticks +#undef tsd_arena_get +#undef tsd_arena_set +#undef tsd_arenap_get +#undef tsd_arenas_tdata_bypass_get +#undef tsd_arenas_tdata_bypass_set +#undef tsd_arenas_tdata_bypassp_get +#undef tsd_arenas_tdata_get +#undef tsd_arenas_tdata_set +#undef tsd_arenas_tdatap_get +#undef tsd_boot +#undef tsd_boot0 +#undef tsd_boot1 +#undef tsd_booted +#undef tsd_booted_get +#undef tsd_cleanup +#undef tsd_cleanup_wrapper +#undef tsd_fetch +#undef tsd_fetch_impl +#undef tsd_get +#undef tsd_get_allocates +#undef tsd_iarena_get +#undef tsd_iarena_set +#undef tsd_iarenap_get +#undef tsd_initialized +#undef tsd_init_check_recursion +#undef tsd_init_finish +#undef tsd_init_head +#undef tsd_narenas_tdata_get +#undef tsd_narenas_tdata_set +#undef tsd_narenas_tdatap_get +#undef tsd_wrapper_get +#undef tsd_wrapper_set +#undef tsd_nominal +#undef tsd_prof_tdata_get +#undef tsd_prof_tdata_set +#undef tsd_prof_tdatap_get +#undef tsd_quarantine_get +#undef tsd_quarantine_set +#undef tsd_quarantinep_get +#undef tsd_set +#undef tsd_tcache_enabled_get +#undef tsd_tcache_enabled_set +#undef tsd_tcache_enabledp_get +#undef tsd_tcache_get +#undef tsd_tcache_set +#undef tsd_tcachep_get +#undef tsd_thread_allocated_get +#undef tsd_thread_allocated_set +#undef tsd_thread_allocatedp_get +#undef tsd_thread_deallocated_get +#undef tsd_thread_deallocated_set +#undef tsd_thread_deallocatedp_get +#undef tsd_tls +#undef tsd_tsd +#undef tsd_tsdn +#undef tsd_witness_fork_get +#undef tsd_witness_fork_set +#undef tsd_witness_forkp_get +#undef tsd_witnesses_get +#undef tsd_witnesses_set +#undef tsd_witnessesp_get +#undef tsdn_fetch +#undef tsdn_null +#undef tsdn_tsd +#undef u2rz +#undef valgrind_freelike_block +#undef valgrind_make_mem_defined +#undef valgrind_make_mem_noaccess +#undef valgrind_make_mem_undefined +#undef witness_assert_depth +#undef witness_assert_depth_to_rank +#undef witness_assert_lockless +#undef witness_assert_not_owner +#undef witness_assert_owner +#undef witness_depth_error +#undef witness_fork_cleanup +#undef witness_init +#undef witness_lock +#undef witness_lock_error +#undef witness_not_owner_error +#undef witness_owner +#undef witness_owner_error +#undef witness_postfork_child +#undef witness_postfork_parent +#undef witness_prefork +#undef witness_unlock +#undef witnesses_cleanup +#undef zone_register diff --git a/sources/jemalloc/include-linux/jemalloc/internal/prng.h b/sources/jemalloc/include-linux/jemalloc/internal/prng.h new file mode 100755 index 00000000..c2bda19c --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/prng.h @@ -0,0 +1,207 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * Simple linear congruential pseudo-random number generator: + * + * prng(y) = (a*x + c) % m + * + * where the following constants ensure maximal period: + * + * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. + * c == Odd number (relatively prime to 2^n). + * m == 2^32 + * + * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. + * + * This choice of m has the disadvantage that the quality of the bits is + * proportional to bit position. For example, the lowest bit has a cycle of 2, + * the next has a cycle of 4, etc. For this reason, we prefer to use the upper + * bits. + */ + +#define PRNG_A_32 UINT32_C(1103515241) +#define PRNG_C_32 UINT32_C(12347) + +#define PRNG_A_64 UINT64_C(6364136223846793005) +#define PRNG_C_64 UINT64_C(1442695040888963407) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +uint32_t prng_state_next_u32(uint32_t state); +uint64_t prng_state_next_u64(uint64_t state); +size_t prng_state_next_zu(size_t state); + +uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, + bool atomic); +uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range); +size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic); + +uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic); +uint64_t prng_range_u64(uint64_t *state, uint64_t range); +size_t prng_range_zu(size_t *state, size_t range, bool atomic); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_)) +JEMALLOC_ALWAYS_INLINE uint32_t +prng_state_next_u32(uint32_t state) +{ + + return ((state * PRNG_A_32) + PRNG_C_32); +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_state_next_u64(uint64_t state) +{ + + return ((state * PRNG_A_64) + PRNG_C_64); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_state_next_zu(size_t state) +{ + +#if LG_SIZEOF_PTR == 2 + return ((state * PRNG_A_32) + PRNG_C_32); +#elif LG_SIZEOF_PTR == 3 + return ((state * PRNG_A_64) + PRNG_C_64); +#else +#error Unsupported pointer size +#endif +} + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) +{ + uint32_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 32); + + if (atomic) { + uint32_t state0; + + do { + state0 = atomic_read_uint32(state); + state1 = prng_state_next_u32(state0); + } while (atomic_cas_uint32(state, state0, state1)); + } else { + state1 = prng_state_next_u32(*state); + *state = state1; + } + ret = state1 >> (32 - lg_range); + + return (ret); +} + +/* 64-bit atomic operations cannot be supported on all relevant platforms. */ +JEMALLOC_ALWAYS_INLINE uint64_t +prng_lg_range_u64(uint64_t *state, unsigned lg_range) +{ + uint64_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 64); + + state1 = prng_state_next_u64(*state); + *state = state1; + ret = state1 >> (64 - lg_range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) +{ + size_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); + + if (atomic) { + size_t state0; + + do { + state0 = atomic_read_z(state); + state1 = prng_state_next_zu(state0); + } while (atomic_cas_z(state, state0, state1)); + } else { + state1 = prng_state_next_zu(*state); + *state = state1; + } + ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_range_u32(uint32_t *state, uint32_t range, bool atomic) +{ + uint32_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u32(state, lg_range, atomic); + } while (ret >= range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_range_u64(uint64_t *state, uint64_t range) +{ + uint64_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u64(state, lg_range); + } while (ret >= range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_range_zu(size_t *state, size_t range, bool atomic) +{ + size_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_zu(state, lg_range, atomic); + } while (ret >= range); + + return (ret); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/prof.h b/sources/jemalloc/include-linux/jemalloc/internal/prof.h new file mode 100755 index 00000000..8293b71e --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/prof.h @@ -0,0 +1,547 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct prof_bt_s prof_bt_t; +typedef struct prof_cnt_s prof_cnt_t; +typedef struct prof_tctx_s prof_tctx_t; +typedef struct prof_gctx_s prof_gctx_t; +typedef struct prof_tdata_s prof_tdata_t; + +/* Option defaults. */ +#ifdef JEMALLOC_PROF +# define PROF_PREFIX_DEFAULT "jeprof" +#else +# define PROF_PREFIX_DEFAULT "" +#endif +#define LG_PROF_SAMPLE_DEFAULT 19 +#define LG_PROF_INTERVAL_DEFAULT -1 + +/* + * Hard limit on stack backtrace depth. The version of prof_backtrace() that + * is based on __builtin_return_address() necessarily has a hard-coded number + * of backtrace frame handlers, and should be kept in sync with this setting. + */ +#define PROF_BT_MAX 128 + +/* Initial hash table size. */ +#define PROF_CKH_MINITEMS 64 + +/* Size of memory buffer to use when writing dump files. */ +#define PROF_DUMP_BUFSIZE 65536 + +/* Size of stack-allocated buffer used by prof_printf(). */ +#define PROF_PRINTF_BUFSIZE 128 + +/* + * Number of mutexes shared among all gctx's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NCTX_LOCKS 1024 + +/* + * Number of mutexes shared among all tdata's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NTDATA_LOCKS 256 + +/* + * prof_tdata pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) +#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) +#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct prof_bt_s { + /* Backtrace, stored as len program counters. */ + void **vec; + unsigned len; +}; + +#ifdef JEMALLOC_PROF_LIBGCC +/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ +typedef struct { + prof_bt_t *bt; + unsigned max; +} prof_unwind_data_t; +#endif + +struct prof_cnt_s { + /* Profiling counters. */ + uint64_t curobjs; + uint64_t curbytes; + uint64_t accumobjs; + uint64_t accumbytes; +}; + +typedef enum { + prof_tctx_state_initializing, + prof_tctx_state_nominal, + prof_tctx_state_dumping, + prof_tctx_state_purgatory /* Dumper must finish destroying. */ +} prof_tctx_state_t; + +struct prof_tctx_s { + /* Thread data for thread that performed the allocation. */ + prof_tdata_t *tdata; + + /* + * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be + * defunct during teardown. + */ + uint64_t thr_uid; + uint64_t thr_discrim; + + /* Profiling counters, protected by tdata->lock. */ + prof_cnt_t cnts; + + /* Associated global context. */ + prof_gctx_t *gctx; + + /* + * UID that distinguishes multiple tctx's created by the same thread, + * but coexisting in gctx->tctxs. There are two ways that such + * coexistence can occur: + * - A dumper thread can cause a tctx to be retained in the purgatory + * state. + * - Although a single "producer" thread must create all tctx's which + * share the same thr_uid, multiple "consumers" can each concurrently + * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only + * gets called once each time cnts.cur{objs,bytes} drop to 0, but this + * threshold can be hit again before the first consumer finishes + * executing prof_tctx_destroy(). + */ + uint64_t tctx_uid; + + /* Linkage into gctx's tctxs. */ + rb_node(prof_tctx_t) tctx_link; + + /* + * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents + * sample vs destroy race. + */ + bool prepared; + + /* Current dump-related state, protected by gctx->lock. */ + prof_tctx_state_t state; + + /* + * Copy of cnts snapshotted during early dump phase, protected by + * dump_mtx. + */ + prof_cnt_t dump_cnts; +}; +typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; + +struct prof_gctx_s { + /* Protects nlimbo, cnt_summed, and tctxs. */ + malloc_mutex_t *lock; + + /* + * Number of threads that currently cause this gctx to be in a state of + * limbo due to one of: + * - Initializing this gctx. + * - Initializing per thread counters associated with this gctx. + * - Preparing to destroy this gctx. + * - Dumping a heap profile that includes this gctx. + * nlimbo must be 1 (single destroyer) in order to safely destroy the + * gctx. + */ + unsigned nlimbo; + + /* + * Tree of profile counters, one for each thread that has allocated in + * this context. + */ + prof_tctx_tree_t tctxs; + + /* Linkage for tree of contexts to be dumped. */ + rb_node(prof_gctx_t) dump_link; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Associated backtrace. */ + prof_bt_t bt; + + /* Backtrace vector, variable size, referred to by bt. */ + void *vec[1]; +}; +typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; + +struct prof_tdata_s { + malloc_mutex_t *lock; + + /* Monotonically increasing unique thread identifier. */ + uint64_t thr_uid; + + /* + * Monotonically increasing discriminator among tdata structures + * associated with the same thr_uid. + */ + uint64_t thr_discrim; + + /* Included in heap profile dumps if non-NULL. */ + char *thread_name; + + bool attached; + bool expired; + + rb_node(prof_tdata_t) tdata_link; + + /* + * Counter used to initialize prof_tctx_t's tctx_uid. No locking is + * necessary when incrementing this field, because only one thread ever + * does so. + */ + uint64_t tctx_uid_next; + + /* + * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks + * backtraces for which it has non-zero allocation/deallocation counters + * associated with thread-specific prof_tctx_t objects. Other threads + * may write to prof_tctx_t contents when freeing associated objects. + */ + ckh_t bt2tctx; + + /* Sampling state. */ + uint64_t prng_state; + uint64_t bytes_until_sample; + + /* State used to avoid dumping while operating on prof internals. */ + bool enq; + bool enq_idump; + bool enq_gdump; + + /* + * Set to true during an early dump phase for tdata's which are + * currently being dumped. New threads' tdata's have this initialized + * to false so that they aren't accidentally included in later dump + * phases. + */ + bool dumping; + + /* + * True if profiling is active for this tdata's thread + * (thread.prof.active mallctl). + */ + bool active; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Backtrace vector, used for calls to prof_backtrace(). */ + void *vec[PROF_BT_MAX]; +}; +typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_prof; +extern bool opt_prof_active; +extern bool opt_prof_thread_active_init; +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_final; /* Final profile dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Accessed via prof_active_[gs]et{_unlocked,}(). */ +extern bool prof_active; + +/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ +extern bool prof_gdump_val; + +/* + * Profile dump interval, measured in bytes allocated. Each arena triggers a + * profile dump when it reaches this threshold. The effect is that the + * interval between profile dumps averages prof_interval, though the actual + * interval between dumps will tend to be sporadic, and the interval will be a + * maximum of approximately (prof_interval * narenas). + */ +extern uint64_t prof_interval; + +/* + * Initialized as opt_lg_prof_sample, and potentially modified during profiling + * resets. + */ +extern size_t lg_prof_sample; + +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); +void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); +void bt_init(prof_bt_t *bt, void **vec); +void prof_backtrace(prof_bt_t *bt); +prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); +#ifdef JEMALLOC_JET +size_t prof_tdata_count(void); +size_t prof_bt_count(void); +const prof_cnt_t *prof_cnt_all(void); +typedef int (prof_dump_open_t)(bool, const char *); +extern prof_dump_open_t *prof_dump_open; +typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); +extern prof_dump_header_t *prof_dump_header; +#endif +void prof_idump(tsdn_t *tsdn); +bool prof_mdump(tsd_t *tsd, const char *filename); +void prof_gdump(tsdn_t *tsdn); +prof_tdata_t *prof_tdata_init(tsd_t *tsd); +prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); +void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_tdata_cleanup(tsd_t *tsd); +bool prof_active_get(tsdn_t *tsdn); +bool prof_active_set(tsdn_t *tsdn, bool active); +const char *prof_thread_name_get(tsd_t *tsd); +int prof_thread_name_set(tsd_t *tsd, const char *thread_name); +bool prof_thread_active_get(tsd_t *tsd); +bool prof_thread_active_set(tsd_t *tsd, bool active); +bool prof_thread_active_init_get(tsdn_t *tsdn); +bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); +bool prof_gdump_get(tsdn_t *tsdn); +bool prof_gdump_set(tsdn_t *tsdn, bool active); +void prof_boot0(void); +void prof_boot1(void); +bool prof_boot2(tsd_t *tsd); +void prof_prefork0(tsdn_t *tsdn); +void prof_prefork1(tsdn_t *tsdn); +void prof_postfork_parent(tsdn_t *tsdn); +void prof_postfork_child(tsdn_t *tsdn); +void prof_sample_threshold_update(prof_tdata_t *tdata); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool prof_active_get_unlocked(void); +bool prof_gdump_get_unlocked(void); +prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); +prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *tctx); +bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, + prof_tdata_t **tdata_out); +prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, + bool update); +void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, + prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, + size_t old_usize, prof_tctx_t *old_tctx); +void prof_free(tsd_t *tsd, const void *ptr, size_t usize); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) +JEMALLOC_ALWAYS_INLINE bool +prof_active_get_unlocked(void) +{ + + /* + * Even if opt_prof is true, sampling can be temporarily disabled by + * setting prof_active to false. No locking is used when reading + * prof_active in the fast path, so there are no guarantees regarding + * how long it will take for all threads to notice state changes. + */ + return (prof_active); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_gdump_get_unlocked(void) +{ + + /* + * No locking is used when reading prof_gdump_val in the fast path, so + * there are no guarantees regarding how long it will take for all + * threads to notice state changes. + */ + return (prof_gdump_val); +} + +JEMALLOC_ALWAYS_INLINE prof_tdata_t * +prof_tdata_get(tsd_t *tsd, bool create) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = tsd_prof_tdata_get(tsd); + if (create) { + if (unlikely(tdata == NULL)) { + if (tsd_nominal(tsd)) { + tdata = prof_tdata_init(tsd); + tsd_prof_tdata_set(tsd, tdata); + } + } else if (unlikely(tdata->expired)) { + tdata = prof_tdata_reinit(tsd, tdata); + tsd_prof_tdata_set(tsd, tdata); + } + assert(tdata == NULL || tdata->attached); + } + + return (tdata); +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_tctx_get(tsdn_t *tsdn, const void *ptr) +{ + + cassert(config_prof); + assert(ptr != NULL); + + return (arena_prof_tctx_get(tsdn, ptr)); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_set(tsdn, ptr, usize, tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, + prof_tctx_t *old_tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, + prof_tdata_t **tdata_out) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, true); + if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) + tdata = NULL; + + if (tdata_out != NULL) + *tdata_out = tdata; + + if (unlikely(tdata == NULL)) + return (true); + + if (likely(tdata->bytes_until_sample >= usize)) { + if (update) + tdata->bytes_until_sample -= usize; + return (true); + } else { + /* Compute new sample threshold. */ + if (update) + prof_sample_threshold_update(tdata); + return (!tdata->active); + } +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) +{ + prof_tctx_t *ret; + prof_tdata_t *tdata; + prof_bt_t bt; + + assert(usize == s2u(usize)); + + if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, + &tdata))) + ret = (prof_tctx_t *)(uintptr_t)1U; + else { + bt_init(&bt, tdata->vec); + prof_backtrace(&bt); + ret = prof_lookup(tsd, &bt); + } + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + assert(usize == isalloc(tsdn, ptr, true)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) + prof_malloc_sample_object(tsdn, ptr, usize, tctx); + else + prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); +} + +JEMALLOC_ALWAYS_INLINE void +prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, + bool prof_active, bool updated, const void *old_ptr, size_t old_usize, + prof_tctx_t *old_tctx) +{ + bool sampled, old_sampled; + + cassert(config_prof); + assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); + + if (prof_active && !updated && ptr != NULL) { + assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); + if (prof_sample_accum_update(tsd, usize, true, NULL)) { + /* + * Don't sample. The usize passed to prof_alloc_prep() + * was larger than what actually got allocated, so a + * backtrace was captured for this allocation, even + * though its actual usize was insufficient to cross the + * sample threshold. + */ + prof_alloc_rollback(tsd, tctx, true); + tctx = (prof_tctx_t *)(uintptr_t)1U; + } + } + + sampled = ((uintptr_t)tctx > (uintptr_t)1U); + old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); + + if (unlikely(sampled)) + prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); + else + prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx); + + if (unlikely(old_sampled)) + prof_free_sampled_object(tsd, old_usize, old_tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_free(tsd_t *tsd, const void *ptr, size_t usize) +{ + prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); + + cassert(config_prof); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) + prof_free_sampled_object(tsd, usize, tctx); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/public_namespace.h b/sources/jemalloc/include-linux/jemalloc/internal/public_namespace.h new file mode 100755 index 00000000..c43cb615 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/public_namespace.h @@ -0,0 +1,22 @@ +#define je_malloc_conf JEMALLOC_N(malloc_conf) +#define je_malloc_message JEMALLOC_N(malloc_message) +#define je_malloc JEMALLOC_N(malloc) +#define je_calloc JEMALLOC_N(calloc) +#define je_posix_memalign JEMALLOC_N(posix_memalign) +#define je_aligned_alloc JEMALLOC_N(aligned_alloc) +#define je_realloc JEMALLOC_N(realloc) +#define je_free JEMALLOC_N(free) +#define je_mallocx JEMALLOC_N(mallocx) +#define je_rallocx JEMALLOC_N(rallocx) +#define je_xallocx JEMALLOC_N(xallocx) +#define je_sallocx JEMALLOC_N(sallocx) +#define je_dallocx JEMALLOC_N(dallocx) +#define je_sdallocx JEMALLOC_N(sdallocx) +#define je_nallocx JEMALLOC_N(nallocx) +#define je_mallctl JEMALLOC_N(mallctl) +#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib) +#define je_mallctlbymib JEMALLOC_N(mallctlbymib) +#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print) +#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size) +#define je_memalign JEMALLOC_N(memalign) +#define je_valloc JEMALLOC_N(valloc) diff --git a/sources/jemalloc/include-linux/jemalloc/internal/public_symbols.txt b/sources/jemalloc/include-linux/jemalloc/internal/public_symbols.txt new file mode 100755 index 00000000..d2c566d4 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/public_symbols.txt @@ -0,0 +1,22 @@ +malloc_conf:je_malloc_conf +malloc_message:je_malloc_message +malloc:je_malloc +calloc:je_calloc +posix_memalign:je_posix_memalign +aligned_alloc:je_aligned_alloc +realloc:je_realloc +free:je_free +mallocx:je_mallocx +rallocx:je_rallocx +xallocx:je_xallocx +sallocx:je_sallocx +dallocx:je_dallocx +sdallocx:je_sdallocx +nallocx:je_nallocx +mallctl:je_mallctl +mallctlnametomib:je_mallctlnametomib +mallctlbymib:je_mallctlbymib +malloc_stats_print:je_malloc_stats_print +malloc_usable_size:je_malloc_usable_size +memalign:je_memalign +valloc:je_valloc diff --git a/sources/jemalloc/include-linux/jemalloc/internal/public_unnamespace.h b/sources/jemalloc/include-linux/jemalloc/internal/public_unnamespace.h new file mode 100755 index 00000000..46819485 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/public_unnamespace.h @@ -0,0 +1,22 @@ +#undef je_malloc_conf +#undef je_malloc_message +#undef je_malloc +#undef je_calloc +#undef je_posix_memalign +#undef je_aligned_alloc +#undef je_realloc +#undef je_free +#undef je_mallocx +#undef je_rallocx +#undef je_xallocx +#undef je_sallocx +#undef je_dallocx +#undef je_sdallocx +#undef je_nallocx +#undef je_mallctl +#undef je_mallctlnametomib +#undef je_mallctlbymib +#undef je_malloc_stats_print +#undef je_malloc_usable_size +#undef je_memalign +#undef je_valloc diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ql.h b/sources/jemalloc/include-linux/jemalloc/internal/ql.h new file mode 100755 index 00000000..1834bb85 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/ql.h @@ -0,0 +1,81 @@ +/* List definitions. */ +#define ql_head(a_type) \ +struct { \ + a_type *qlh_first; \ +} + +#define ql_head_initializer(a_head) {NULL} + +#define ql_elm(a_type) qr(a_type) + +/* List functions. */ +#define ql_new(a_head) do { \ + (a_head)->qlh_first = NULL; \ +} while (0) + +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) + +#define ql_first(a_head) ((a_head)->qlh_first) + +#define ql_last(a_head, a_field) \ + ((ql_first(a_head) != NULL) \ + ? qr_prev(ql_first(a_head), a_field) : NULL) + +#define ql_next(a_head, a_elm, a_field) \ + ((ql_last(a_head, a_field) != (a_elm)) \ + ? qr_next((a_elm), a_field) : NULL) + +#define ql_prev(a_head, a_elm, a_field) \ + ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ + : NULL) + +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ + qr_before_insert((a_qlelm), (a_elm), a_field); \ + if (ql_first(a_head) == (a_qlelm)) { \ + ql_first(a_head) = (a_elm); \ + } \ +} while (0) + +#define ql_after_insert(a_qlelm, a_elm, a_field) \ + qr_after_insert((a_qlelm), (a_elm), a_field) + +#define ql_head_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = (a_elm); \ +} while (0) + +#define ql_tail_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = qr_next((a_elm), a_field); \ +} while (0) + +#define ql_remove(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) == (a_elm)) { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ + } \ + if (ql_first(a_head) != (a_elm)) { \ + qr_remove((a_elm), a_field); \ + } else { \ + ql_first(a_head) = NULL; \ + } \ +} while (0) + +#define ql_head_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_first(a_head); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_tail_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_last(a_head, a_field); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_foreach(a_var, a_head, a_field) \ + qr_foreach((a_var), ql_first(a_head), a_field) + +#define ql_reverse_foreach(a_var, a_head, a_field) \ + qr_reverse_foreach((a_var), ql_first(a_head), a_field) diff --git a/sources/jemalloc/include-linux/jemalloc/internal/qr.h b/sources/jemalloc/include-linux/jemalloc/internal/qr.h new file mode 100755 index 00000000..0fbaec25 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/qr.h @@ -0,0 +1,69 @@ +/* Ring definitions. */ +#define qr(a_type) \ +struct { \ + a_type *qre_next; \ + a_type *qre_prev; \ +} + +/* Ring functions. */ +#define qr_new(a_qr, a_field) do { \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) + +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) + +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qrelm); \ + (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ + (a_qrelm)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_after_insert(a_qrelm, a_qr, a_field) \ + do \ + { \ + (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ + (a_qr)->a_field.qre_prev = (a_qrelm); \ + (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ + (a_qrelm)->a_field.qre_next = (a_qr); \ + } while (0) + +#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ + void *t; \ + (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ + t = (a_qr_a)->a_field.qre_prev; \ + (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ + (a_qr_b)->a_field.qre_prev = t; \ +} while (0) + +/* + * qr_meld() and qr_split() are functionally equivalent, so there's no need to + * have two copies of the code. + */ +#define qr_split(a_qr_a, a_qr_b, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_field) + +#define qr_remove(a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev->a_field.qre_next \ + = (a_qr)->a_field.qre_next; \ + (a_qr)->a_field.qre_next->a_field.qre_prev \ + = (a_qr)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_foreach(var, a_qr, a_field) \ + for ((var) = (a_qr); \ + (var) != NULL; \ + (var) = (((var)->a_field.qre_next != (a_qr)) \ + ? (var)->a_field.qre_next : NULL)) + +#define qr_reverse_foreach(var, a_qr, a_field) \ + for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ + (var) != NULL; \ + (var) = (((var) != (a_qr)) \ + ? (var)->a_field.qre_prev : NULL)) diff --git a/sources/jemalloc/include-linux/jemalloc/internal/quarantine.h b/sources/jemalloc/include-linux/jemalloc/internal/quarantine.h new file mode 100755 index 00000000..ae607399 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/quarantine.h @@ -0,0 +1,60 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct quarantine_obj_s quarantine_obj_t; +typedef struct quarantine_s quarantine_t; + +/* Default per thread quarantine size if valgrind is enabled. */ +#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct quarantine_obj_s { + void *ptr; + size_t usize; +}; + +struct quarantine_s { + size_t curbytes; + size_t curobjs; + size_t first; +#define LG_MAXOBJS_INIT 10 + size_t lg_maxobjs; + quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void quarantine_alloc_hook_work(tsd_t *tsd); +void quarantine(tsd_t *tsd, void *ptr); +void quarantine_cleanup(tsd_t *tsd); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void quarantine_alloc_hook(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) +JEMALLOC_ALWAYS_INLINE void +quarantine_alloc_hook(void) +{ + tsd_t *tsd; + + assert(config_fill && opt_quarantine); + + tsd = tsd_fetch(); + if (tsd_quarantine_get(tsd) == NULL) + quarantine_alloc_hook_work(tsd); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-linux/jemalloc/internal/rb.h b/sources/jemalloc/include-linux/jemalloc/internal/rb.h new file mode 100755 index 00000000..3770342f --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/rb.h @@ -0,0 +1,1003 @@ +/*- + ******************************************************************************* + * + * cpp macro implementation of left-leaning 2-3 red-black trees. Parent + * pointers are not used, and color bits are stored in the least significant + * bit of right-child pointers (if RB_COMPACT is defined), thus making node + * linkage as compact as is possible for red-black trees. + * + * Usage: + * + * #include + * #include + * #define NDEBUG // (Optional, see assert(3).) + * #include + * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) + * #include + * ... + * + ******************************************************************************* + */ + +#ifndef RB_H_ +#define RB_H_ + +#ifdef RB_COMPACT +/* Node structure. */ +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right_red; \ +} +#else +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right; \ + bool rbn_red; \ +} +#endif + +/* Root structure. */ +#define rb_tree(a_type) \ +struct { \ + a_type *rbt_root; \ +} + +/* Left accessors. */ +#define rbtn_left_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_left) +#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ + (a_node)->a_field.rbn_left = a_left; \ +} while (0) + +#ifdef RB_COMPACT +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ + & ((ssize_t)-2))) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ + | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ + & ((size_t)1))) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ + | ((ssize_t)a_red)); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ + (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ +} while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + /* Bookkeeping bit cannot be used by node pointer. */ \ + assert(((uintptr_t)(a_node) & 0x1) == 0); \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) +#else +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_right) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right = a_right; \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_red) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_red = (a_red); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = true; \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = false; \ +} while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) +#endif + +/* Tree initializer. */ +#define rb_new(a_type, a_field, a_rbt) do { \ + (a_rbt)->rbt_root = NULL; \ +} while (0) + +/* Internal utility macros. */ +#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != NULL) { \ + for (; \ + rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != NULL) { \ + for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ + rbtn_right_set(a_type, a_field, (a_node), \ + rbtn_left_get(a_type, a_field, (r_node))); \ + rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ + rbtn_left_set(a_type, a_field, (a_node), \ + rbtn_right_get(a_type, a_field, (r_node))); \ + rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +/* + * The rb_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to rb_gen(). + */ + +#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree); \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg); \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg); + +/* + * The rb_gen() macro generates a type-specific red-black tree implementation, + * based on the above cpp macros. + * + * Arguments: + * + * a_attr : Function attribute for generated functions (ex: static). + * a_prefix : Prefix for generated functions (ex: ex_). + * a_rb_type : Type for red-black tree data structure (ex: ex_t). + * a_type : Type for red-black tree node data structure (ex: ex_node_t). + * a_field : Name of red-black tree node linkage (ex: ex_link). + * a_cmp : Node comparison function name, with the following prototype: + * int (a_cmp *)(a_type *a_node, a_type *a_other); + * ^^^^^^ + * or a_key + * Interpretation of comparison function return values: + * -1 : a_node < a_other + * 0 : a_node == a_other + * 1 : a_node > a_other + * In all cases, the a_node or a_key macro argument is the first + * argument to the comparison function, which makes it possible + * to write comparison functions that treat the first argument + * specially. + * + * Assuming the following setup: + * + * typedef struct ex_node_s ex_node_t; + * struct ex_node_s { + * rb_node(ex_node_t) ex_link; + * }; + * typedef rb_tree(ex_node_t) ex_t; + * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) + * + * The following API is generated: + * + * static void + * ex_new(ex_t *tree); + * Description: Initialize a red-black tree structure. + * Args: + * tree: Pointer to an uninitialized red-black tree object. + * + * static bool + * ex_empty(ex_t *tree); + * Description: Determine whether tree is empty. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: True if tree is empty, false otherwise. + * + * static ex_node_t * + * ex_first(ex_t *tree); + * static ex_node_t * + * ex_last(ex_t *tree); + * Description: Get the first/last node in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: First/last node in tree, or NULL if tree is empty. + * + * static ex_node_t * + * ex_next(ex_t *tree, ex_node_t *node); + * static ex_node_t * + * ex_prev(ex_t *tree, ex_node_t *node); + * Description: Get node's successor/predecessor. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: A node in tree. + * Ret: node's successor/predecessor in tree, or NULL if node is + * last/first. + * + * static ex_node_t * + * ex_search(ex_t *tree, const ex_node_t *key); + * Description: Search for node that matches key. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or NULL if no match. + * + * static ex_node_t * + * ex_nsearch(ex_t *tree, const ex_node_t *key); + * static ex_node_t * + * ex_psearch(ex_t *tree, const ex_node_t *key); + * Description: Search for node that matches key. If no match is found, + * return what would be key's successor/predecessor, were + * key in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or if no match, hypothetical node's + * successor/predecessor (NULL if no successor/predecessor). + * + * static void + * ex_insert(ex_t *tree, ex_node_t *node); + * Description: Insert node into tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node to be inserted into tree. + * + * static void + * ex_remove(ex_t *tree, ex_node_t *node); + * Description: Remove node from tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node in tree to be removed. + * + * static ex_node_t * + * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * static ex_node_t * + * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * Description: Iterate forward/backward over tree, starting at node. If + * tree is modified, iteration must be immediately + * terminated by the callback function that causes the + * modification. + * Args: + * tree : Pointer to an initialized red-black tree object. + * start: Node at which to start iteration, or NULL to start at + * first/last node. + * cb : Callback function, which is called for each node during + * iteration. Under normal circumstances the callback function + * should return NULL, which causes iteration to continue. If a + * callback function returns non-NULL, iteration is immediately + * terminated and the non-NULL return value is returned by the + * iterator. This is useful for re-starting iteration after + * modifying tree. + * arg : Opaque pointer passed to cb(). + * Ret: NULL if iteration completed, or the non-NULL callback return value + * that caused termination of the iteration. + * + * static void + * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); + * Description: Iterate over the tree with post-order traversal, remove + * each node, and run the callback if non-null. This is + * used for destroying a tree without paying the cost to + * rebalance it. The tree must not be otherwise altered + * during traversal. + * Args: + * tree: Pointer to an initialized red-black tree object. + * cb : Callback function, which, if non-null, is called for each node + * during iteration. There is no way to stop iteration once it + * has begun. + * arg : Opaque pointer passed to cb(). + */ +#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree) { \ + rb_new(a_type, a_field, rbtree); \ +} \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree) { \ + return (rbtree->rbt_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_right_get(a_type, a_field, node) != NULL) { \ + rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != NULL); \ + ret = NULL; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != NULL); \ + } \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_left_get(a_type, a_field, node) != NULL) { \ + rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != NULL); \ + ret = NULL; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != NULL); \ + } \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + int cmp; \ + ret = rbtree->rbt_root; \ + while (ret != NULL \ + && (cmp = (a_cmp)(key, ret)) != 0) { \ + if (cmp < 0) { \ + ret = rbtn_left_get(a_type, a_field, ret); \ + } else { \ + ret = rbtn_right_get(a_type, a_field, ret); \ + } \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = NULL; \ + while (tnode != NULL) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = NULL; \ + while (tnode != NULL) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + return (ret); \ +} \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } path[sizeof(void *) << 4], *pathp; \ + rbt_node_new(a_type, a_field, rbtree, node); \ + /* Wind. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != NULL; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + assert(cmp != 0); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + } \ + } \ + pathp->node = node; \ + /* Unwind. */ \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + a_type *cnode = pathp->node; \ + if (pathp->cmp < 0) { \ + a_type *left = pathp[1].node; \ + rbtn_left_set(a_type, a_field, cnode, left); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* Fix up 4-node. */ \ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, cnode, tnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } else { \ + a_type *right = pathp[1].node; \ + rbtn_right_set(a_type, a_field, cnode, right); \ + if (rbtn_red_get(a_type, a_field, right)) { \ + a_type *left = rbtn_left_get(a_type, a_field, cnode); \ + if (left != NULL && rbtn_red_get(a_type, a_field, \ + left)) { \ + /* Split 4-node. */ \ + rbtn_black_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, right); \ + rbtn_red_set(a_type, a_field, cnode); \ + } else { \ + /* Lean left. */ \ + a_type *tnode; \ + bool tred = rbtn_red_get(a_type, a_field, cnode); \ + rbtn_rotate_left(a_type, a_field, cnode, tnode); \ + rbtn_color_set(a_type, a_field, tnode, tred); \ + rbtn_red_set(a_type, a_field, cnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } \ + pathp->node = cnode; \ + } \ + /* Set root, and make it black. */ \ + rbtree->rbt_root = path->node; \ + rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ +} \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } *pathp, *nodep, path[sizeof(void *) << 4]; \ + /* Wind. */ \ + nodep = NULL; /* Silence compiler warning. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != NULL; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + if (cmp == 0) { \ + /* Find node's successor, in preparation for swap. */ \ + pathp->cmp = 1; \ + nodep = pathp; \ + for (pathp++; pathp->node != NULL; \ + pathp++) { \ + pathp->cmp = -1; \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } \ + break; \ + } \ + } \ + } \ + assert(nodep->node == node); \ + pathp--; \ + if (pathp->node != node) { \ + /* Swap node with its successor. */ \ + bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ + rbtn_color_set(a_type, a_field, pathp->node, \ + rbtn_red_get(a_type, a_field, node)); \ + rbtn_left_set(a_type, a_field, pathp->node, \ + rbtn_left_get(a_type, a_field, node)); \ + /* If node's successor is its right child, the following code */\ + /* will do the wrong thing for the right child pointer. */\ + /* However, it doesn't matter, because the pointer will be */\ + /* properly set when the successor is pruned. */\ + rbtn_right_set(a_type, a_field, pathp->node, \ + rbtn_right_get(a_type, a_field, node)); \ + rbtn_color_set(a_type, a_field, node, tred); \ + /* The pruned leaf node's child pointers are never accessed */\ + /* again, so don't bother setting them to nil. */\ + nodep->node = pathp->node; \ + pathp->node = node; \ + if (nodep == path) { \ + rbtree->rbt_root = nodep->node; \ + } else { \ + if (nodep[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } else { \ + rbtn_right_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } \ + } \ + } else { \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + if (left != NULL) { \ + /* node has no successor, but it has a left child. */\ + /* Splice node out, without losing the left child. */\ + assert(!rbtn_red_get(a_type, a_field, node)); \ + assert(rbtn_red_get(a_type, a_field, left)); \ + rbtn_black_set(a_type, a_field, left); \ + if (pathp == path) { \ + rbtree->rbt_root = left; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + left); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + left); \ + } \ + } \ + return; \ + } else if (pathp == path) { \ + /* The tree only contained one node. */ \ + rbtree->rbt_root = NULL; \ + return; \ + } \ + } \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + /* Prune red node, which requires no fixup. */ \ + assert(pathp[-1].cmp < 0); \ + rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ + return; \ + } \ + /* The node to be pruned is black, so unwind until balance is */\ + /* restored. */\ + pathp->node = NULL; \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + assert(pathp->cmp != 0); \ + if (pathp->cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + a_type *tnode; \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ + /* In the following diagrams, ||, //, and \\ */\ + /* indicate the path to the removed node. */\ + /* */\ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + /* */\ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + /* */\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, rightleft); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + a_type *tnode; \ + rbtn_red_set(a_type, a_field, pathp->node); \ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + pathp->node = tnode; \ + } \ + } \ + } else { \ + a_type *left; \ + rbtn_right_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + left = rbtn_left_get(a_type, a_field, pathp->node); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *tnode; \ + a_type *leftright = rbtn_right_get(a_type, a_field, \ + left); \ + a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ + leftright); \ + if (leftrightleft != NULL && rbtn_red_get(a_type, \ + a_field, leftrightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (r) */\ + a_type *unode; \ + rbtn_black_set(a_type, a_field, leftrightleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + unode); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_right_set(a_type, a_field, unode, tnode); \ + rbtn_rotate_left(a_type, a_field, unode, tnode); \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (b) */\ + assert(leftright != NULL); \ + rbtn_red_set(a_type, a_field, leftright); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_black_set(a_type, a_field, tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root, which may actually be the tree root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + } \ + return; \ + } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, pathp->node); \ + /* Balance restored. */ \ + return; \ + } \ + } else { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + } \ + } \ + } \ + } \ + /* Set root. */ \ + rbtree->rbt_root = path->node; \ + assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ +} \ +a_attr a_type * \ +a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return (NULL); \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ + a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ + arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp < 0) { \ + a_type *ret; \ + if ((ret = a_prefix##iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } else if (cmp > 0) { \ + return (a_prefix##iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ + cb, arg); \ + } else { \ + ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return (NULL); \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp > 0) { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } else if (cmp < 0) { \ + return (a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtree->rbt_root, cb, arg); \ + } else { \ + ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ + cb, arg); \ + } \ + return (ret); \ +} \ +a_attr void \ +a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ + a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return; \ + } \ + a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_left_set(a_type, a_field, (node), NULL); \ + a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_right_set(a_type, a_field, (node), NULL); \ + if (cb) { \ + cb(node, arg); \ + } \ +} \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg) { \ + a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ + rbtree->rbt_root = NULL; \ +} + +#endif /* RB_H_ */ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/rtree.h b/sources/jemalloc/include-linux/jemalloc/internal/rtree.h new file mode 100755 index 00000000..8d0c584d --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/rtree.h @@ -0,0 +1,366 @@ +/* + * This radix tree implementation is tailored to the singular purpose of + * associating metadata with chunks that are currently owned by jemalloc. + * + ******************************************************************************* + */ +#ifdef JEMALLOC_H_TYPES + +typedef struct rtree_node_elm_s rtree_node_elm_t; +typedef struct rtree_level_s rtree_level_t; +typedef struct rtree_s rtree_t; + +/* + * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the + * machine address width. + */ +#define LG_RTREE_BITS_PER_LEVEL 4 +#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL) +/* Maximum rtree height. */ +#define RTREE_HEIGHT_MAX \ + ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) + +/* Used for two-stage lock-free node initialization. */ +#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) + +/* + * The node allocation callback function's argument is the number of contiguous + * rtree_node_elm_t structures to allocate, and the resulting memory must be + * zeroed. + */ +typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); +typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct rtree_node_elm_s { + union { + void *pun; + rtree_node_elm_t *child; + extent_node_t *val; + }; +}; + +struct rtree_level_s { + /* + * A non-NULL subtree points to a subtree rooted along the hypothetical + * path to the leaf node corresponding to key 0. Depending on what keys + * have been used to store to the tree, an arbitrary combination of + * subtree pointers may remain NULL. + * + * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. + * This results in a 3-level tree, and the leftmost leaf can be directly + * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding + * 0x00000000) can be accessed via subtrees[1], and the remainder of the + * tree can be accessed via subtrees[0]. + * + * levels[0] : [ | 0x0001******** | 0x0002******** | ...] + * + * levels[1] : [ | 0x00000001**** | 0x00000002**** | ... ] + * + * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] + * + * This has practical implications on x64, which currently uses only the + * lower 47 bits of virtual address space in userland, thus leaving + * subtrees[0] unused and avoiding a level of tree traversal. + */ + union { + void *subtree_pun; + rtree_node_elm_t *subtree; + }; + /* Number of key bits distinguished by this level. */ + unsigned bits; + /* + * Cumulative number of key bits distinguished by traversing to + * corresponding tree level. + */ + unsigned cumbits; +}; + +struct rtree_s { + rtree_node_alloc_t *alloc; + rtree_node_dalloc_t *dalloc; + unsigned height; + /* + * Precomputed table used to convert from the number of leading 0 key + * bits to which subtree level to start at. + */ + unsigned start_level[RTREE_HEIGHT_MAX]; + rtree_level_t levels[RTREE_HEIGHT_MAX]; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, + rtree_node_dalloc_t *dalloc); +void rtree_delete(rtree_t *rtree); +rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, + unsigned level); +rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, + rtree_node_elm_t *elm, unsigned level); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); +uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); + +bool rtree_node_valid(rtree_node_elm_t *node); +rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm, + bool dependent); +rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent); +extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, + bool dependent); +void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, + const extent_node_t *val); +rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level, + bool dependent); +rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level, + bool dependent); + +extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); +bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) +JEMALLOC_ALWAYS_INLINE unsigned +rtree_start_level(rtree_t *rtree, uintptr_t key) +{ + unsigned start_level; + + if (unlikely(key == 0)) + return (rtree->height - 1); + + start_level = rtree->start_level[lg_floor(key) >> + LG_RTREE_BITS_PER_LEVEL]; + assert(start_level < rtree->height); + return (start_level); +} + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) +{ + + return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - + rtree->levels[level].cumbits)) & ((ZU(1) << + rtree->levels[level].bits) - 1)); +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_node_valid(rtree_node_elm_t *node) +{ + + return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); +} + +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_child_tryread(rtree_node_elm_t *elm, bool dependent) +{ + rtree_node_elm_t *child; + + /* Double-checked read (first read may be stale. */ + child = elm->child; + if (!dependent && !rtree_node_valid(child)) + child = atomic_read_p(&elm->pun); + assert(!dependent || child != NULL); + return (child); +} + +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, + bool dependent) +{ + rtree_node_elm_t *child; + + child = rtree_child_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_node_valid(child))) + child = rtree_child_read_hard(rtree, elm, level); + assert(!dependent || child != NULL); + return (child); +} + +JEMALLOC_ALWAYS_INLINE extent_node_t * +rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) +{ + + if (dependent) { + /* + * Reading a val on behalf of a pointer to a valid allocation is + * guaranteed to be a clean read even without synchronization, + * because the rtree update became visible in memory before the + * pointer came into existence. + */ + return (elm->val); + } else { + /* + * An arbitrary read, e.g. on behalf of ivsalloc(), may not be + * dependent on a previous rtree write, which means a stale read + * could result if synchronization were omitted here. + */ + return (atomic_read_p(&elm->pun)); + } +} + +JEMALLOC_INLINE void +rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) +{ + + atomic_write_p(&elm->pun, val); +} + +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) +{ + rtree_node_elm_t *subtree; + + /* Double-checked read (first read may be stale. */ + subtree = rtree->levels[level].subtree; + if (!dependent && unlikely(!rtree_node_valid(subtree))) + subtree = atomic_read_p(&rtree->levels[level].subtree_pun); + assert(!dependent || subtree != NULL); + return (subtree); +} + +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent) +{ + rtree_node_elm_t *subtree; + + subtree = rtree_subtree_tryread(rtree, level, dependent); + if (!dependent && unlikely(!rtree_node_valid(subtree))) + subtree = rtree_subtree_read_hard(rtree, level); + assert(!dependent || subtree != NULL); + return (subtree); +} + +JEMALLOC_ALWAYS_INLINE extent_node_t * +rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) +{ + uintptr_t subkey; + unsigned start_level; + rtree_node_elm_t *node; + + start_level = rtree_start_level(rtree, key); + + node = rtree_subtree_tryread(rtree, start_level, dependent); +#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) + switch (start_level + RTREE_GET_BIAS) { +#define RTREE_GET_SUBTREE(level) \ + case level: \ + assert(level < (RTREE_HEIGHT_MAX-1)); \ + if (!dependent && unlikely(!rtree_node_valid(node))) \ + return (NULL); \ + subkey = rtree_subkey(rtree, key, level - \ + RTREE_GET_BIAS); \ + node = rtree_child_tryread(&node[subkey], dependent); \ + /* Fall through. */ +#define RTREE_GET_LEAF(level) \ + case level: \ + assert(level == (RTREE_HEIGHT_MAX-1)); \ + if (!dependent && unlikely(!rtree_node_valid(node))) \ + return (NULL); \ + subkey = rtree_subkey(rtree, key, level - \ + RTREE_GET_BIAS); \ + /* \ + * node is a leaf, so it contains values rather than \ + * child pointers. \ + */ \ + return (rtree_val_read(rtree, &node[subkey], \ + dependent)); +#if RTREE_HEIGHT_MAX > 1 + RTREE_GET_SUBTREE(0) +#endif +#if RTREE_HEIGHT_MAX > 2 + RTREE_GET_SUBTREE(1) +#endif +#if RTREE_HEIGHT_MAX > 3 + RTREE_GET_SUBTREE(2) +#endif +#if RTREE_HEIGHT_MAX > 4 + RTREE_GET_SUBTREE(3) +#endif +#if RTREE_HEIGHT_MAX > 5 + RTREE_GET_SUBTREE(4) +#endif +#if RTREE_HEIGHT_MAX > 6 + RTREE_GET_SUBTREE(5) +#endif +#if RTREE_HEIGHT_MAX > 7 + RTREE_GET_SUBTREE(6) +#endif +#if RTREE_HEIGHT_MAX > 8 + RTREE_GET_SUBTREE(7) +#endif +#if RTREE_HEIGHT_MAX > 9 + RTREE_GET_SUBTREE(8) +#endif +#if RTREE_HEIGHT_MAX > 10 + RTREE_GET_SUBTREE(9) +#endif +#if RTREE_HEIGHT_MAX > 11 + RTREE_GET_SUBTREE(10) +#endif +#if RTREE_HEIGHT_MAX > 12 + RTREE_GET_SUBTREE(11) +#endif +#if RTREE_HEIGHT_MAX > 13 + RTREE_GET_SUBTREE(12) +#endif +#if RTREE_HEIGHT_MAX > 14 + RTREE_GET_SUBTREE(13) +#endif +#if RTREE_HEIGHT_MAX > 15 + RTREE_GET_SUBTREE(14) +#endif +#if RTREE_HEIGHT_MAX > 16 +# error Unsupported RTREE_HEIGHT_MAX +#endif + RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1) +#undef RTREE_GET_SUBTREE +#undef RTREE_GET_LEAF + default: not_reached(); + } +#undef RTREE_GET_BIAS + not_reached(); +} + +JEMALLOC_INLINE bool +rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) +{ + uintptr_t subkey; + unsigned i, start_level; + rtree_node_elm_t *node, *child; + + start_level = rtree_start_level(rtree, key); + + node = rtree_subtree_read(rtree, start_level, false); + if (node == NULL) + return (true); + for (i = start_level; /**/; i++, node = child) { + subkey = rtree_subkey(rtree, key, i); + if (i == rtree->height - 1) { + /* + * node is a leaf, so it contains values rather than + * child pointers. + */ + rtree_val_write(rtree, &node[subkey], val); + return (false); + } + assert(i + 1 < rtree->height); + child = rtree_child_read(rtree, &node[subkey], i, false); + if (child == NULL) + return (true); + } + not_reached(); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/size_classes.h b/sources/jemalloc/include-linux/jemalloc/internal/size_classes.h new file mode 100755 index 00000000..e4edc4bc --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/size_classes.h @@ -0,0 +1,1428 @@ +/* This file was automatically generated by size_classes.sh. */ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to + * be defined prior to inclusion, and it in turn defines: + * + * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. + * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz, + * bin, lg_delta_lookup) tuples. + * index: Size class index. + * lg_grp: Lg group base size (no deltas added). + * lg_delta: Lg delta to previous size class. + * ndelta: Delta multiplier. size == 1< 255) +# error "Too many small size classes" +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/smoothstep.h b/sources/jemalloc/include-linux/jemalloc/internal/smoothstep.h new file mode 100755 index 00000000..c5333cca --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/smoothstep.h @@ -0,0 +1,246 @@ +/* + * This file was generated by the following command: + * sh smoothstep.sh smoother 200 24 3 15 + */ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * This header defines a precomputed table based on the smoothstep family of + * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 + * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so + * that floating point math can be avoided. + * + * 3 2 + * smoothstep(x) = -2x + 3x + * + * 5 4 3 + * smootherstep(x) = 6x - 15x + 10x + * + * 7 6 5 4 + * smootheststep(x) = -20x + 70x - 84x + 35x + */ + +#define SMOOTHSTEP_VARIANT "smoother" +#define SMOOTHSTEP_NSTEPS 200 +#define SMOOTHSTEP_BFP 24 +#define SMOOTHSTEP \ + /* STEP(step, h, x, y) */ \ + STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ + STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ + STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ + STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ + STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ + STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ + STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ + STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ + STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ + STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ + STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ + STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ + STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ + STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ + STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ + STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ + STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ + STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ + STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ + STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ + STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ + STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ + STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ + STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ + STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ + STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ + STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ + STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ + STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ + STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ + STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ + STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ + STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ + STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ + STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ + STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ + STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ + STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ + STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ + STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ + STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ + STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ + STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ + STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ + STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ + STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ + STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ + STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ + STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ + STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ + STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ + STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ + STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ + STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ + STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ + STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ + STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ + STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ + STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ + STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ + STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ + STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ + STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ + STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ + STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ + STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ + STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ + STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ + STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ + STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ + STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ + STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ + STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ + STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ + STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ + STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ + STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ + STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ + STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ + STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ + STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ + STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ + STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ + STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ + STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ + STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ + STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ + STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ + STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ + STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ + STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ + STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ + STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ + STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ + STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ + STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ + STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ + STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ + STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ + STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ + STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ + STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ + STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ + STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ + STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ + STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ + STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ + STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ + STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ + STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ + STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ + STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ + STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ + STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ + STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ + STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ + STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ + STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ + STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ + STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ + STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ + STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ + STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ + STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ + STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ + STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ + STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ + STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ + STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ + STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ + STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ + STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ + STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ + STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ + STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ + STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ + STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ + STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ + STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ + STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ + STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ + STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ + STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ + STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ + STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ + STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ + STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ + STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ + STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ + STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ + STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ + STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ + STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ + STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ + STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ + STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ + STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ + STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ + STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ + STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ + STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ + STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ + STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ + STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ + STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ + STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ + STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ + STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ + STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ + STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ + STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ + STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ + STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ + STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ + STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ + STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ + STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ + STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ + STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ + STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ + STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ + STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ + STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ + STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ + STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ + STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ + STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ + STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ + STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ + STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ + STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ + STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ + STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ + STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ + STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ + STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ + STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ + STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ + STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ + STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/spin.h b/sources/jemalloc/include-linux/jemalloc/internal/spin.h new file mode 100755 index 00000000..9ef5ceb9 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/spin.h @@ -0,0 +1,51 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct spin_s spin_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct spin_s { + unsigned iteration; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void spin_init(spin_t *spin); +void spin_adaptive(spin_t *spin); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_)) +JEMALLOC_INLINE void +spin_init(spin_t *spin) +{ + + spin->iteration = 0; +} + +JEMALLOC_INLINE void +spin_adaptive(spin_t *spin) +{ + volatile uint64_t i; + + for (i = 0; i < (KQU(1) << spin->iteration); i++) + CPU_SPINWAIT; + + if (spin->iteration < 63) + spin->iteration++; +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-linux/jemalloc/internal/stats.h b/sources/jemalloc/include-linux/jemalloc/internal/stats.h new file mode 100755 index 00000000..04e7dae1 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/stats.h @@ -0,0 +1,197 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct tcache_bin_stats_s tcache_bin_stats_t; +typedef struct malloc_bin_stats_s malloc_bin_stats_t; +typedef struct malloc_large_stats_s malloc_large_stats_t; +typedef struct malloc_huge_stats_s malloc_huge_stats_t; +typedef struct arena_stats_s arena_stats_t; +typedef struct chunk_stats_s chunk_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct tcache_bin_stats_s { + /* + * Number of allocation requests that corresponded to the size of this + * bin. + */ + uint64_t nrequests; +}; + +struct malloc_bin_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the bin. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to the size of this + * bin. This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* + * Current number of regions of this size class, including regions + * currently cached by tcache. + */ + size_t curregs; + + /* Number of tcache fills from this bin. */ + uint64_t nfills; + + /* Number of tcache flushes to this bin. */ + uint64_t nflushes; + + /* Total number of runs created for this bin's size class. */ + uint64_t nruns; + + /* + * Total number of runs reused by extracting them from the runs tree for + * this bin's size class. + */ + uint64_t reruns; + + /* Current number of runs in this bin. */ + size_t curruns; +}; + +struct malloc_large_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to this size class. + * This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* + * Current number of runs of this size class, including runs currently + * cached by tcache. + */ + size_t curruns; +}; + +struct malloc_huge_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* Current number of (multi-)chunk allocations of this size class. */ + size_t curhchunks; +}; + +struct arena_stats_s { + /* Number of bytes currently mapped. */ + size_t mapped; + + /* + * Number of bytes currently retained as a side effect of munmap() being + * disabled/bypassed. Retained bytes are technically mapped (though + * always decommitted or purged), but they are excluded from the mapped + * statistic (above). + */ + size_t retained; + + /* + * Total number of purge sweeps, total number of madvise calls made, + * and total pages purged in order to keep dirty unused memory under + * control. + */ + uint64_t npurge; + uint64_t nmadvise; + uint64_t purged; + + /* + * Number of bytes currently mapped purely for metadata purposes, and + * number of bytes currently allocated for internal metadata. + */ + size_t metadata_mapped; + size_t metadata_allocated; /* Protected via atomic_*_z(). */ + + /* Per-size-category statistics. */ + size_t allocated_large; + uint64_t nmalloc_large; + uint64_t ndalloc_large; + uint64_t nrequests_large; + + size_t allocated_huge; + uint64_t nmalloc_huge; + uint64_t ndalloc_huge; + + /* One element for each large size class. */ + malloc_large_stats_t *lstats; + + /* One element for each huge size class. */ + malloc_huge_stats_t *hstats; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_stats_print; + +extern size_t stats_cactive; + +void stats_print(void (*write)(void *, const char *), void *cbopaque, + const char *opts); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +size_t stats_cactive_get(void); +void stats_cactive_add(size_t size); +void stats_cactive_sub(size_t size); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) +JEMALLOC_INLINE size_t +stats_cactive_get(void) +{ + + return (atomic_read_z(&stats_cactive)); +} + +JEMALLOC_INLINE void +stats_cactive_add(size_t size) +{ + + assert(size > 0); + assert((size & chunksize_mask) == 0); + + atomic_add_z(&stats_cactive, size); +} + +JEMALLOC_INLINE void +stats_cactive_sub(size_t size) +{ + + assert(size > 0); + assert((size & chunksize_mask) == 0); + + atomic_sub_z(&stats_cactive, size); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/tcache.h b/sources/jemalloc/include-linux/jemalloc/internal/tcache.h new file mode 100755 index 00000000..5fe5ebfa --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/tcache.h @@ -0,0 +1,472 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct tcache_bin_info_s tcache_bin_info_t; +typedef struct tcache_bin_s tcache_bin_t; +typedef struct tcache_s tcache_t; +typedef struct tcaches_s tcaches_t; + +/* + * tcache pointers close to NULL are used to encode state information that is + * used for two purposes: preventing thread caching on a per thread basis and + * cleaning up during thread shutdown. + */ +#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) +#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) +#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) +#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY + +/* + * Absolute minimum number of cache slots for each small bin. + */ +#define TCACHE_NSLOTS_SMALL_MIN 20 + +/* + * Absolute maximum number of cache slots for each small bin in the thread + * cache. This is an additional constraint beyond that imposed as: twice the + * number of regions per run for this size class. + * + * This constant must be an even number. + */ +#define TCACHE_NSLOTS_SMALL_MAX 200 + +/* Number of cache slots for large size classes. */ +#define TCACHE_NSLOTS_LARGE 20 + +/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ +#define LG_TCACHE_MAXCLASS_DEFAULT 15 + +/* + * TCACHE_GC_SWEEP is the approximate number of allocation events between + * full GC sweeps. Integer rounding may cause the actual number to be + * slightly higher, since GC is performed incrementally. + */ +#define TCACHE_GC_SWEEP 8192 + +/* Number of tcache allocation/deallocation events between incremental GCs. */ +#define TCACHE_GC_INCR \ + ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +typedef enum { + tcache_enabled_false = 0, /* Enable cast to/from bool. */ + tcache_enabled_true = 1, + tcache_enabled_default = 2 +} tcache_enabled_t; + +/* + * Read-only information associated with each element of tcache_t's tbins array + * is stored separately, mainly to reduce memory usage. + */ +struct tcache_bin_info_s { + unsigned ncached_max; /* Upper limit on ncached. */ +}; + +struct tcache_bin_s { + tcache_bin_stats_t tstats; + int low_water; /* Min # cached since last GC. */ + unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ + unsigned ncached; /* # of cached objects. */ + /* + * To make use of adjacent cacheline prefetch, the items in the avail + * stack goes to higher address for newer allocations. avail points + * just above the available space, which means that + * avail[-ncached, ... -1] are available items and the lowest item will + * be allocated first. + */ + void **avail; /* Stack of available objects. */ +}; + +struct tcache_s { + ql_elm(tcache_t) link; /* Used for aggregating stats. */ + uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ + ticker_t gc_ticker; /* Drives incremental GC. */ + szind_t next_gc_bin; /* Next bin to GC. */ + tcache_bin_t tbins[1]; /* Dynamically sized. */ + /* + * The pointer stacks associated with tbins follow as a contiguous + * array. During tcache initialization, the avail pointer in each + * element of tbins is initialized to point to the proper offset within + * this array. + */ +}; + +/* Linkage for list of available (previously used) explicit tcache IDs. */ +struct tcaches_s { + union { + tcache_t *tcache; + tcaches_t *next; + }; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_tcache; +extern ssize_t opt_lg_tcache_max; + +extern tcache_bin_info_t *tcache_bin_info; + +/* + * Number of tcache bins. There are NBINS small-object bins, plus 0 or more + * large-object bins. + */ +extern unsigned nhbins; + +/* Maximum cached size class. */ +extern size_t tcache_maxclass; + +/* + * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and + * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are + * completely disjoint from this data structure. tcaches starts off as a sparse + * array, so it has no physical memory footprint until individual pages are + * touched. This allows the entire array to be allocated the first time an + * explicit tcache is created without a disproportionate impact on memory usage. + */ +extern tcaches_t *tcaches; + +size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); +void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, bool *tcache_success); +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + szind_t binind, unsigned rem); +void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache); +void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, + arena_t *oldarena, arena_t *newarena); +tcache_t *tcache_get_hard(tsd_t *tsd); +tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena); +void tcache_cleanup(tsd_t *tsd); +void tcache_enabled_cleanup(tsd_t *tsd); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsd_t *tsd, unsigned *r_ind); +void tcaches_flush(tsd_t *tsd, unsigned ind); +void tcaches_destroy(tsd_t *tsd, unsigned ind); +bool tcache_boot(tsdn_t *tsdn); +void tcache_prefork(tsdn_t *tsdn); +void tcache_postfork_parent(tsdn_t *tsdn); +void tcache_postfork_child(tsdn_t *tsdn); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void tcache_event(tsd_t *tsd, tcache_t *tcache); +void tcache_flush(void); +bool tcache_enabled_get(void); +tcache_t *tcache_get(tsd_t *tsd, bool create); +void tcache_enabled_set(bool enabled); +void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success); +void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, szind_t ind, bool zero, bool slow_path); +void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, szind_t ind, bool zero, bool slow_path); +void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, + szind_t binind, bool slow_path); +void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, + size_t size, bool slow_path); +tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) +JEMALLOC_INLINE void +tcache_flush(void) +{ + tsd_t *tsd; + + cassert(config_tcache); + + tsd = tsd_fetch(); + tcache_cleanup(tsd); +} + +JEMALLOC_INLINE bool +tcache_enabled_get(void) +{ + tsd_t *tsd; + tcache_enabled_t tcache_enabled; + + cassert(config_tcache); + + tsd = tsd_fetch(); + tcache_enabled = tsd_tcache_enabled_get(tsd); + if (tcache_enabled == tcache_enabled_default) { + tcache_enabled = (tcache_enabled_t)opt_tcache; + tsd_tcache_enabled_set(tsd, tcache_enabled); + } + + return ((bool)tcache_enabled); +} + +JEMALLOC_INLINE void +tcache_enabled_set(bool enabled) +{ + tsd_t *tsd; + tcache_enabled_t tcache_enabled; + + cassert(config_tcache); + + tsd = tsd_fetch(); + + tcache_enabled = (tcache_enabled_t)enabled; + tsd_tcache_enabled_set(tsd, tcache_enabled); + + if (!enabled) + tcache_cleanup(tsd); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcache_get(tsd_t *tsd, bool create) +{ + tcache_t *tcache; + + if (!config_tcache) + return (NULL); + + tcache = tsd_tcache_get(tsd); + if (!create) + return (tcache); + if (unlikely(tcache == NULL) && tsd_nominal(tsd)) { + tcache = tcache_get_hard(tsd); + tsd_tcache_set(tsd, tcache); + } + + return (tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_event(tsd_t *tsd, tcache_t *tcache) +{ + + if (TCACHE_GC_INCR == 0) + return; + + if (unlikely(ticker_tick(&tcache->gc_ticker))) + tcache_event_hard(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) +{ + void *ret; + + if (unlikely(tbin->ncached == 0)) { + tbin->low_water = -1; + *tcache_success = false; + return (NULL); + } + /* + * tcache_success (instead of ret) should be checked upon the return of + * this function. We avoid checking (ret == NULL) because there is + * never a null stored on the avail stack (which is unknown to the + * compiler), and eagerly checking ret would cause pipeline stall + * (waiting for the cacheline). + */ + *tcache_success = true; + ret = *(tbin->avail - tbin->ncached); + tbin->ncached--; + + if (unlikely((int)tbin->ncached < tbin->low_water)) + tbin->low_water = tbin->ncached; + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) +{ + void *ret; + tcache_bin_t *tbin; + bool tcache_success; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + assert(binind < NBINS); + tbin = &tcache->tbins[binind]; + ret = tcache_alloc_easy(tbin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + bool tcache_hard_success; + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); + + ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, + tbin, binind, &tcache_hard_success); + if (tcache_hard_success == false) + return (NULL); + } + + assert(ret); + /* + * Only compute usize if required. The checks in the following if + * statement are all static. + */ + if (config_prof || (slow_path && config_fill) || unlikely(zero)) { + usize = index2size(binind); + assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); + } + + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, + &arena_bin_info[binind], false); + } else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + } else { + if (slow_path && config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &arena_bin_info[binind], + true); + } + memset(ret, 0, usize); + } + + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += usize; + tcache_event(tsd, tcache); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) +{ + void *ret; + tcache_bin_t *tbin; + bool tcache_success; + + assert(binind < nhbins); + tbin = &tcache->tbins[binind]; + ret = tcache_alloc_easy(tbin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + /* + * Only allocate one large object at a time, because it's quite + * expensive to create one and not use it. + */ + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); + + ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero); + if (ret == NULL) + return (NULL); + } else { + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + /* Only compute usize on demand */ + if (config_prof || (slow_path && config_fill) || + unlikely(zero)) { + usize = index2size(binind); + assert(usize <= tcache_maxclass); + } + + if (config_prof && usize == LARGE_MINCLASS) { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(ret); + size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> + LG_PAGE); + arena_mapbits_large_binind_set(chunk, pageind, + BININD_INVALID); + } + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + memset(ret, JEMALLOC_ALLOC_JUNK, + usize); + } else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + } else + memset(ret, 0, usize); + + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += usize; + } + + tcache_event(tsd, tcache); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, + bool slow_path) +{ + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); + + if (slow_path && config_fill && unlikely(opt_junk_free)) + arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); + + tbin = &tcache->tbins[binind]; + tbin_info = &tcache_bin_info[binind]; + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { + tcache_bin_flush_small(tsd, tcache, tbin, binind, + (tbin_info->ncached_max >> 1)); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->ncached++; + *(tbin->avail - tbin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, + bool slow_path) +{ + szind_t binind; + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert((size & PAGE_MASK) == 0); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); + + binind = size2index(size); + + if (slow_path && config_fill && unlikely(opt_junk_free)) + arena_dalloc_junk_large(ptr, size); + + tbin = &tcache->tbins[binind]; + tbin_info = &tcache_bin_info[binind]; + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { + tcache_bin_flush_large(tsd, tbin, binind, + (tbin_info->ncached_max >> 1), tcache); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->ncached++; + *(tbin->avail - tbin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcaches_get(tsd_t *tsd, unsigned ind) +{ + tcaches_t *elm = &tcaches[ind]; + if (unlikely(elm->tcache == NULL)) { + elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd, + NULL)); + } + return (elm->tcache); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/ticker.h b/sources/jemalloc/include-linux/jemalloc/internal/ticker.h new file mode 100755 index 00000000..4696e56d --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/ticker.h @@ -0,0 +1,75 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ticker_s ticker_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct ticker_s { + int32_t tick; + int32_t nticks; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void ticker_init(ticker_t *ticker, int32_t nticks); +void ticker_copy(ticker_t *ticker, const ticker_t *other); +int32_t ticker_read(const ticker_t *ticker); +bool ticker_ticks(ticker_t *ticker, int32_t nticks); +bool ticker_tick(ticker_t *ticker); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_)) +JEMALLOC_INLINE void +ticker_init(ticker_t *ticker, int32_t nticks) +{ + + ticker->tick = nticks; + ticker->nticks = nticks; +} + +JEMALLOC_INLINE void +ticker_copy(ticker_t *ticker, const ticker_t *other) +{ + + *ticker = *other; +} + +JEMALLOC_INLINE int32_t +ticker_read(const ticker_t *ticker) +{ + + return (ticker->tick); +} + +JEMALLOC_INLINE bool +ticker_ticks(ticker_t *ticker, int32_t nticks) +{ + + if (unlikely(ticker->tick < nticks)) { + ticker->tick = ticker->nticks; + return (true); + } + ticker->tick -= nticks; + return(false); +} + +JEMALLOC_INLINE bool +ticker_tick(ticker_t *ticker) +{ + + return (ticker_ticks(ticker, 1)); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/tsd.h b/sources/jemalloc/include-linux/jemalloc/internal/tsd.h new file mode 100755 index 00000000..9f374335 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/tsd.h @@ -0,0 +1,788 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Maximum number of malloc_tsd users with cleanup functions. */ +#define MALLOC_TSD_CLEANUPS_MAX 2 + +typedef bool (*malloc_tsd_cleanup_t)(void); + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +typedef struct tsd_init_block_s tsd_init_block_t; +typedef struct tsd_init_head_s tsd_init_head_t; +#endif + +typedef struct tsd_s tsd_t; +typedef struct tsdn_s tsdn_t; + +#define TSDN_NULL ((tsdn_t *)0) + +typedef enum { + tsd_state_uninitialized, + tsd_state_nominal, + tsd_state_purgatory, + tsd_state_reincarnated +} tsd_state_t; + +/* + * TLS/TSD-agnostic macro-based implementation of thread-specific data. There + * are five macros that support (at least) three use cases: file-private, + * library-private, and library-private inlined. Following is an example + * library-private tsd variable: + * + * In example.h: + * typedef struct { + * int x; + * int y; + * } example_t; + * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) + * malloc_tsd_types(example_, example_t) + * malloc_tsd_protos(, example_, example_t) + * malloc_tsd_externs(example_, example_t) + * In example.c: + * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) + * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, + * example_tsd_cleanup) + * + * The result is a set of generated functions, e.g.: + * + * bool example_tsd_boot(void) {...} + * bool example_tsd_booted_get(void) {...} + * example_t *example_tsd_get(bool init) {...} + * void example_tsd_set(example_t *val) {...} + * + * Note that all of the functions deal in terms of (a_type *) rather than + * (a_type) so that it is possible to support non-pointer types (unlike + * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is + * cast to (void *). This means that the cleanup function needs to cast the + * function argument to (a_type *), then dereference the resulting pointer to + * access fields, e.g. + * + * void + * example_tsd_cleanup(void *arg) + * { + * example_t *example = (example_t *)arg; + * + * example->x = 42; + * [...] + * if ([want the cleanup function to be called again]) + * example_tsd_set(example); + * } + * + * If example_tsd_set() is called within example_tsd_cleanup(), it will be + * called again. This is similar to how pthreads TSD destruction works, except + * that pthreads only calls the cleanup function again if the value was set to + * non-NULL. + */ + +/* malloc_tsd_types(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_types(a_name, a_type) +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_types(a_name, a_type) +#elif (defined(_WIN32)) +#define malloc_tsd_types(a_name, a_type) \ +typedef struct { \ + bool initialized; \ + a_type val; \ +} a_name##tsd_wrapper_t; +#else +#define malloc_tsd_types(a_name, a_type) \ +typedef struct { \ + bool initialized; \ + a_type val; \ +} a_name##tsd_wrapper_t; +#endif + +/* malloc_tsd_protos(). */ +#define malloc_tsd_protos(a_attr, a_name, a_type) \ +a_attr bool \ +a_name##tsd_boot0(void); \ +a_attr void \ +a_name##tsd_boot1(void); \ +a_attr bool \ +a_name##tsd_boot(void); \ +a_attr bool \ +a_name##tsd_booted_get(void); \ +a_attr a_type * \ +a_name##tsd_get(bool init); \ +a_attr void \ +a_name##tsd_set(a_type *val); + +/* malloc_tsd_externs(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_externs(a_name, a_type) \ +extern __thread a_type a_name##tsd_tls; \ +extern __thread bool a_name##tsd_initialized; \ +extern bool a_name##tsd_booted; +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_externs(a_name, a_type) \ +extern __thread a_type a_name##tsd_tls; \ +extern pthread_key_t a_name##tsd_tsd; \ +extern bool a_name##tsd_booted; +#elif (defined(_WIN32)) +#define malloc_tsd_externs(a_name, a_type) \ +extern DWORD a_name##tsd_tsd; \ +extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ +extern bool a_name##tsd_booted; +#else +#define malloc_tsd_externs(a_name, a_type) \ +extern pthread_key_t a_name##tsd_tsd; \ +extern tsd_init_head_t a_name##tsd_init_head; \ +extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ +extern bool a_name##tsd_booted; +#endif + +/* malloc_tsd_data(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr __thread a_type JEMALLOC_TLS_MODEL \ + a_name##tsd_tls = a_initializer; \ +a_attr __thread bool JEMALLOC_TLS_MODEL \ + a_name##tsd_initialized = false; \ +a_attr bool a_name##tsd_booted = false; +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr __thread a_type JEMALLOC_TLS_MODEL \ + a_name##tsd_tls = a_initializer; \ +a_attr pthread_key_t a_name##tsd_tsd; \ +a_attr bool a_name##tsd_booted = false; +#elif (defined(_WIN32)) +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr DWORD a_name##tsd_tsd; \ +a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ + false, \ + a_initializer \ +}; \ +a_attr bool a_name##tsd_booted = false; +#else +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr pthread_key_t a_name##tsd_tsd; \ +a_attr tsd_init_head_t a_name##tsd_init_head = { \ + ql_head_initializer(blocks), \ + MALLOC_MUTEX_INITIALIZER \ +}; \ +a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ + false, \ + a_initializer \ +}; \ +a_attr bool a_name##tsd_booted = false; +#endif + +/* malloc_tsd_funcs(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_cleanup_wrapper(void) \ +{ \ + \ + if (a_name##tsd_initialized) { \ + a_name##tsd_initialized = false; \ + a_cleanup(&a_name##tsd_tls); \ + } \ + return (a_name##tsd_initialized); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + malloc_tsd_cleanup_register( \ + &a_name##tsd_cleanup_wrapper); \ + } \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + \ + /* Do nothing. */ \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + return (a_name##tsd_boot0()); \ +} \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(bool init) \ +{ \ + \ + assert(a_name##tsd_booted); \ + return (&a_name##tsd_tls); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + \ + assert(a_name##tsd_booted); \ + a_name##tsd_tls = (*val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + a_name##tsd_initialized = true; \ +} +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ + 0) \ + return (true); \ + } \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + \ + /* Do nothing. */ \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + return (a_name##tsd_boot0()); \ +} \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(bool init) \ +{ \ + \ + assert(a_name##tsd_booted); \ + return (&a_name##tsd_tls); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + \ + assert(a_name##tsd_booted); \ + a_name##tsd_tls = (*val); \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)(&a_name##tsd_tls))) { \ + malloc_write(": Error" \ + " setting TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + } \ +} +#elif (defined(_WIN32)) +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_cleanup_wrapper(void) \ +{ \ + DWORD error = GetLastError(); \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + TlsGetValue(a_name##tsd_tsd); \ + SetLastError(error); \ + \ + if (wrapper == NULL) \ + return (false); \ + if (a_cleanup != malloc_tsd_no_cleanup && \ + wrapper->initialized) { \ + wrapper->initialized = false; \ + a_cleanup(&wrapper->val); \ + if (wrapper->initialized) { \ + /* Trigger another cleanup round. */ \ + return (true); \ + } \ + } \ + malloc_tsd_dalloc(wrapper); \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ +{ \ + \ + if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ + malloc_write(": Error setting" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ +} \ +a_attr a_name##tsd_wrapper_t * \ +a_name##tsd_wrapper_get(bool init) \ +{ \ + DWORD error = GetLastError(); \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + TlsGetValue(a_name##tsd_tsd); \ + SetLastError(error); \ + \ + if (init && unlikely(wrapper == NULL)) { \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } else { \ + wrapper->initialized = false; \ + wrapper->val = a_initializer; \ + } \ + a_name##tsd_wrapper_set(wrapper); \ + } \ + return (wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + a_name##tsd_tsd = TlsAlloc(); \ + if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ + return (true); \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + malloc_tsd_cleanup_register( \ + &a_name##tsd_cleanup_wrapper); \ + } \ + a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ + memcpy(wrapper, &a_name##tsd_boot_wrapper, \ + sizeof(a_name##tsd_wrapper_t)); \ + a_name##tsd_wrapper_set(wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + if (a_name##tsd_boot0()) \ + return (true); \ + a_name##tsd_boot1(); \ + return (false); \ +} \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (true); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(bool init) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(init); \ + if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ + return (NULL); \ + return (&wrapper->val); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(true); \ + wrapper->val = *(val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + wrapper->initialized = true; \ +} +#else +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr void \ +a_name##tsd_cleanup_wrapper(void *arg) \ +{ \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ + \ + if (a_cleanup != malloc_tsd_no_cleanup && \ + wrapper->initialized) { \ + wrapper->initialized = false; \ + a_cleanup(&wrapper->val); \ + if (wrapper->initialized) { \ + /* Trigger another cleanup round. */ \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)wrapper)) { \ + malloc_write(": Error" \ + " setting TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + return; \ + } \ + } \ + malloc_tsd_dalloc(wrapper); \ +} \ +a_attr void \ +a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ +{ \ + \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)wrapper)) { \ + malloc_write(": Error setting" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ +} \ +a_attr a_name##tsd_wrapper_t * \ +a_name##tsd_wrapper_get(bool init) \ +{ \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + pthread_getspecific(a_name##tsd_tsd); \ + \ + if (init && unlikely(wrapper == NULL)) { \ + tsd_init_block_t block; \ + wrapper = (a_name##tsd_wrapper_t *) \ + tsd_init_check_recursion(&a_name##tsd_init_head, \ + &block); \ + if (wrapper) \ + return (wrapper); \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + block.data = (void *)wrapper; \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } else { \ + wrapper->initialized = false; \ + wrapper->val = a_initializer; \ + } \ + a_name##tsd_wrapper_set(wrapper); \ + tsd_init_finish(&a_name##tsd_init_head, &block); \ + } \ + return (wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (pthread_key_create(&a_name##tsd_tsd, \ + a_name##tsd_cleanup_wrapper) != 0) \ + return (true); \ + a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ + memcpy(wrapper, &a_name##tsd_boot_wrapper, \ + sizeof(a_name##tsd_wrapper_t)); \ + a_name##tsd_wrapper_set(wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + if (a_name##tsd_boot0()) \ + return (true); \ + a_name##tsd_boot1(); \ + return (false); \ +} \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (true); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(bool init) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(init); \ + if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ + return (NULL); \ + return (&wrapper->val); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(true); \ + wrapper->val = *(val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + wrapper->initialized = true; \ +} +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +struct tsd_init_block_s { + ql_elm(tsd_init_block_t) link; + pthread_t thread; + void *data; +}; +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; +#endif + +#define MALLOC_TSD \ +/* O(name, type) */ \ + O(tcache, tcache_t *) \ + O(thread_allocated, uint64_t) \ + O(thread_deallocated, uint64_t) \ + O(prof_tdata, prof_tdata_t *) \ + O(iarena, arena_t *) \ + O(arena, arena_t *) \ + O(arenas_tdata, arena_tdata_t *) \ + O(narenas_tdata, unsigned) \ + O(arenas_tdata_bypass, bool) \ + O(tcache_enabled, tcache_enabled_t) \ + O(quarantine, quarantine_t *) \ + O(witnesses, witness_list_t) \ + O(witness_fork, bool) \ + +#define TSD_INITIALIZER { \ + tsd_state_uninitialized, \ + NULL, \ + 0, \ + 0, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + 0, \ + false, \ + tcache_enabled_default, \ + NULL, \ + ql_head_initializer(witnesses), \ + false \ +} + +struct tsd_s { + tsd_state_t state; +#define O(n, t) \ + t n; +MALLOC_TSD +#undef O +}; + +/* + * Wrapper around tsd_t that makes it possible to avoid implicit conversion + * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be + * explicitly converted to tsd_t, which is non-nullable. + */ +struct tsdn_s { + tsd_t tsd; +}; + +static const tsd_t tsd_initializer = TSD_INITIALIZER; + +malloc_tsd_types(, tsd_t) + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); +void malloc_tsd_no_cleanup(void *arg); +void malloc_tsd_cleanup_register(bool (*f)(void)); +tsd_t *malloc_tsd_boot0(void); +void malloc_tsd_boot1(void); +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void *tsd_init_check_recursion(tsd_init_head_t *head, + tsd_init_block_t *block); +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); +#endif +void tsd_cleanup(void *arg); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) + +tsd_t *tsd_fetch_impl(bool init); +tsd_t *tsd_fetch(void); +tsdn_t *tsd_tsdn(tsd_t *tsd); +bool tsd_nominal(tsd_t *tsd); +#define O(n, t) \ +t *tsd_##n##p_get(tsd_t *tsd); \ +t tsd_##n##_get(tsd_t *tsd); \ +void tsd_##n##_set(tsd_t *tsd, t n); +MALLOC_TSD +#undef O +tsdn_t *tsdn_fetch(void); +bool tsdn_null(const tsdn_t *tsdn); +tsd_t *tsdn_tsd(tsdn_t *tsdn); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) +malloc_tsd_externs(, tsd_t) +malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_impl(bool init) +{ + tsd_t *tsd = tsd_get(init); + + if (!init && tsd_get_allocates() && tsd == NULL) + return (NULL); + assert(tsd != NULL); + + if (unlikely(tsd->state != tsd_state_nominal)) { + if (tsd->state == tsd_state_uninitialized) { + tsd->state = tsd_state_nominal; + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + } else if (tsd->state == tsd_state_purgatory) { + tsd->state = tsd_state_reincarnated; + tsd_set(tsd); + } else + assert(tsd->state == tsd_state_reincarnated); + } + + return (tsd); +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch(void) +{ + + return (tsd_fetch_impl(true)); +} + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsd_tsdn(tsd_t *tsd) +{ + + return ((tsdn_t *)tsd); +} + +JEMALLOC_INLINE bool +tsd_nominal(tsd_t *tsd) +{ + + return (tsd->state == tsd_state_nominal); +} + +#define O(n, t) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get(tsd_t *tsd) \ +{ \ + \ + return (&tsd->n); \ +} \ + \ +JEMALLOC_ALWAYS_INLINE t \ +tsd_##n##_get(tsd_t *tsd) \ +{ \ + \ + return (*tsd_##n##p_get(tsd)); \ +} \ + \ +JEMALLOC_ALWAYS_INLINE void \ +tsd_##n##_set(tsd_t *tsd, t n) \ +{ \ + \ + assert(tsd->state == tsd_state_nominal); \ + tsd->n = n; \ +} +MALLOC_TSD +#undef O + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsdn_fetch(void) +{ + + if (!tsd_booted_get()) + return (NULL); + + return (tsd_tsdn(tsd_fetch_impl(false))); +} + +JEMALLOC_ALWAYS_INLINE bool +tsdn_null(const tsdn_t *tsdn) +{ + + return (tsdn == NULL); +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsdn_tsd(tsdn_t *tsdn) +{ + + assert(!tsdn_null(tsdn)); + + return (&tsdn->tsd); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/util.h b/sources/jemalloc/include-linux/jemalloc/internal/util.h new file mode 100755 index 00000000..4b56d652 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/util.h @@ -0,0 +1,342 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#ifdef _WIN32 +# ifdef _WIN64 +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "ll" +# else +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "" +# endif +# define FMTd32 "d" +# define FMTu32 "u" +# define FMTx32 "x" +# define FMTd64 FMT64_PREFIX "d" +# define FMTu64 FMT64_PREFIX "u" +# define FMTx64 FMT64_PREFIX "x" +# define FMTdPTR FMTPTR_PREFIX "d" +# define FMTuPTR FMTPTR_PREFIX "u" +# define FMTxPTR FMTPTR_PREFIX "x" +#else +# include +# define FMTd32 PRId32 +# define FMTu32 PRIu32 +# define FMTx32 PRIx32 +# define FMTd64 PRId64 +# define FMTu64 PRIu64 +# define FMTx64 PRIx64 +# define FMTdPTR PRIdPTR +# define FMTuPTR PRIuPTR +# define FMTxPTR PRIxPTR +#endif + +/* Size of stack-allocated buffer passed to buferror(). */ +#define BUFERROR_BUF 64 + +/* + * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be + * large enough for all possible uses within jemalloc. + */ +#define MALLOC_PRINTF_BUFSIZE 4096 + +/* Junk fill patterns. */ +#ifndef JEMALLOC_ALLOC_JUNK +# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +#endif +#ifndef JEMALLOC_FREE_JUNK +# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +#endif + +/* + * Wrap a cpp argument that contains commas such that it isn't broken up into + * multiple arguments. + */ +#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ + +/* + * Silence compiler warnings due to uninitialized values. This is used + * wherever the compiler fails to recognize that the variable is never used + * uninitialized. + */ +#ifdef JEMALLOC_CC_SILENCE +# define JEMALLOC_CC_SILENCE_INIT(v) = v +#else +# define JEMALLOC_CC_SILENCE_INIT(v) +#endif + +#ifdef __GNUC__ +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#else +# define likely(x) !!(x) +# define unlikely(x) !!(x) +#endif + +#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) +# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure +#endif + +#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() + +#include "jemalloc/internal/assert.h" + +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#define cassert(c) do { \ + if (unlikely(!(c))) \ + not_reached(); \ +} while (0) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +int buferror(int err, char *buf, size_t buflen); +uintmax_t malloc_strtoumax(const char *restrict nptr, + char **restrict endptr, int base); +void malloc_write(const char *s); + +/* + * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating + * point math. + */ +size_t malloc_vsnprintf(char *str, size_t size, const char *format, + va_list ap); +size_t malloc_snprintf(char *str, size_t size, const char *format, ...) + JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap); +void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, + const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +unsigned ffs_llu(unsigned long long bitmap); +unsigned ffs_lu(unsigned long bitmap); +unsigned ffs_u(unsigned bitmap); +unsigned ffs_zu(size_t bitmap); +unsigned ffs_u64(uint64_t bitmap); +unsigned ffs_u32(uint32_t bitmap); +uint64_t pow2_ceil_u64(uint64_t x); +uint32_t pow2_ceil_u32(uint32_t x); +size_t pow2_ceil_zu(size_t x); +unsigned lg_floor(size_t x); +void set_errno(int errnum); +int get_errno(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) + +/* Sanity check. */ +#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ + || !defined(JEMALLOC_INTERNAL_FFS) +# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure +#endif + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_llu(unsigned long long bitmap) +{ + + return (JEMALLOC_INTERNAL_FFSLL(bitmap)); +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_lu(unsigned long bitmap) +{ + + return (JEMALLOC_INTERNAL_FFSL(bitmap)); +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_u(unsigned bitmap) +{ + + return (JEMALLOC_INTERNAL_FFS(bitmap)); +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_zu(size_t bitmap) +{ + +#if LG_SIZEOF_PTR == LG_SIZEOF_INT + return (ffs_u(bitmap)); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG + return (ffs_lu(bitmap)); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG + return (ffs_llu(bitmap)); +#else +#error No implementation for size_t ffs() +#endif +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_u64(uint64_t bitmap) +{ + +#if LG_SIZEOF_LONG == 3 + return (ffs_lu(bitmap)); +#elif LG_SIZEOF_LONG_LONG == 3 + return (ffs_llu(bitmap)); +#else +#error No implementation for 64-bit ffs() +#endif +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_u32(uint32_t bitmap) +{ + +#if LG_SIZEOF_INT == 2 + return (ffs_u(bitmap)); +#else +#error No implementation for 32-bit ffs() +#endif + return (ffs_u(bitmap)); +} + +JEMALLOC_INLINE uint64_t +pow2_ceil_u64(uint64_t x) +{ + + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x |= x >> 32; + x++; + return (x); +} + +JEMALLOC_INLINE uint32_t +pow2_ceil_u32(uint32_t x) +{ + + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + return (x); +} + +/* Compute the smallest power of 2 that is >= x. */ +JEMALLOC_INLINE size_t +pow2_ceil_zu(size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return (pow2_ceil_u64(x)); +#else + return (pow2_ceil_u32(x)); +#endif +} + +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE unsigned +lg_floor(size_t x) +{ + size_t ret; + + assert(x != 0); + + asm ("bsr %1, %0" + : "=r"(ret) // Outputs. + : "r"(x) // Inputs. + ); + assert(ret < UINT_MAX); + return ((unsigned)ret); +} +#elif (defined(_MSC_VER)) +JEMALLOC_INLINE unsigned +lg_floor(size_t x) +{ + unsigned long ret; + + assert(x != 0); + +#if (LG_SIZEOF_PTR == 3) + _BitScanReverse64(&ret, x); +#elif (LG_SIZEOF_PTR == 2) + _BitScanReverse(&ret, x); +#else +# error "Unsupported type size for lg_floor()" +#endif + assert(ret < UINT_MAX); + return ((unsigned)ret); +} +#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) +JEMALLOC_INLINE unsigned +lg_floor(size_t x) +{ + + assert(x != 0); + +#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) + return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); +#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) + return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); +#else +# error "Unsupported type size for lg_floor()" +#endif +} +#else +JEMALLOC_INLINE unsigned +lg_floor(size_t x) +{ + + assert(x != 0); + + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); +#if (LG_SIZEOF_PTR == 3) + x |= (x >> 32); +#endif + if (x == SIZE_T_MAX) + return ((8 << LG_SIZEOF_PTR) - 1); + x++; + return (ffs_zu(x) - 2); +} +#endif + +/* Set error code. */ +JEMALLOC_INLINE void +set_errno(int errnum) +{ + +#ifdef _WIN32 + SetLastError(errnum); +#else + errno = errnum; +#endif +} + +/* Get last error code. */ +JEMALLOC_INLINE int +get_errno(void) +{ + +#ifdef _WIN32 + return (GetLastError()); +#else + return (errno); +#endif +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/internal/valgrind.h b/sources/jemalloc/include-linux/jemalloc/internal/valgrind.h new file mode 100755 index 00000000..877a142b --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/valgrind.h @@ -0,0 +1,128 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#ifdef JEMALLOC_VALGRIND +#include + +/* + * The size that is reported to Valgrind must be consistent through a chain of + * malloc..realloc..realloc calls. Request size isn't recorded anywhere in + * jemalloc, so it is critical that all callers of these macros provide usize + * rather than request size. As a result, buffer overflow detection is + * technically weakened for the standard API, though it is generally accepted + * practice to consider any extra bytes reported by malloc_usable_size() as + * usable space. + */ +#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_noaccess(ptr, usize); \ +} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_undefined(ptr, usize); \ +} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_defined(ptr, usize); \ +} while (0) +/* + * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro + * calls must be embedded in macros rather than in functions so that when + * Valgrind reports errors, there are no extra stack frames in the backtraces. + */ +#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ + if (unlikely(in_valgrind && cond)) { \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ + zero); \ + } \ +} while (0) +#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \ + ((ptr) != (old_ptr)) +#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \ + (ptr == NULL) +#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \ + (old_ptr == NULL) +#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \ + old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \ + if (unlikely(in_valgrind)) { \ + size_t rzsize = p2rz(tsdn, ptr); \ + \ + if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \ + old_ptr)) { \ + VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ + usize, rzsize); \ + if (zero && old_usize < usize) { \ + valgrind_make_mem_defined( \ + (void *)((uintptr_t)ptr + \ + old_usize), usize - old_usize); \ + } \ + } else { \ + if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \ + old_ptr_null(old_ptr)) { \ + valgrind_freelike_block(old_ptr, \ + old_rzsize); \ + } \ + if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \ + ptr_null(ptr)) { \ + size_t copy_size = (old_usize < usize) \ + ? old_usize : usize; \ + size_t tail_size = usize - copy_size; \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ + rzsize, false); \ + if (copy_size > 0) { \ + valgrind_make_mem_defined(ptr, \ + copy_size); \ + } \ + if (zero && tail_size > 0) { \ + valgrind_make_mem_defined( \ + (void *)((uintptr_t)ptr + \ + copy_size), tail_size); \ + } \ + } \ + } \ + } \ +} while (0) +#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_freelike_block(ptr, rzsize); \ +} while (0) +#else +#define RUNNING_ON_VALGRIND ((unsigned)0) +#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) +#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ + ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ + zero) do {} while (0) +#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_VALGRIND +void valgrind_make_mem_noaccess(void *ptr, size_t usize); +void valgrind_make_mem_undefined(void *ptr, size_t usize); +void valgrind_make_mem_defined(void *ptr, size_t usize); +void valgrind_freelike_block(void *ptr, size_t usize); +#endif + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-linux/jemalloc/internal/witness.h b/sources/jemalloc/include-linux/jemalloc/internal/witness.h new file mode 100755 index 00000000..30d8c7e9 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/internal/witness.h @@ -0,0 +1,304 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct witness_s witness_t; +typedef unsigned witness_rank_t; +typedef ql_head(witness_t) witness_list_t; +typedef int witness_comp_t (const witness_t *, const witness_t *); + +/* + * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by + * the witness machinery. + */ +#define WITNESS_RANK_OMIT 0U + +#define WITNESS_RANK_MIN 1U + +#define WITNESS_RANK_INIT 1U +#define WITNESS_RANK_CTL 1U +#define WITNESS_RANK_TCACHES 2U +#define WITNESS_RANK_ARENAS 3U + +#define WITNESS_RANK_PROF_DUMP 4U +#define WITNESS_RANK_PROF_BT2GCTX 5U +#define WITNESS_RANK_PROF_TDATAS 6U +#define WITNESS_RANK_PROF_TDATA 7U +#define WITNESS_RANK_PROF_GCTX 8U + +/* + * Used as an argument to witness_assert_depth_to_rank() in order to validate + * depth excluding non-core locks with lower ranks. Since the rank argument to + * witness_assert_depth_to_rank() is inclusive rather than exclusive, this + * definition can have the same value as the minimally ranked core lock. + */ +#define WITNESS_RANK_CORE 9U + +#define WITNESS_RANK_ARENA 9U +#define WITNESS_RANK_ARENA_CHUNKS 10U +#define WITNESS_RANK_ARENA_NODE_CACHE 11U + +#define WITNESS_RANK_BASE 12U + +#define WITNESS_RANK_LEAF 0xffffffffU +#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF +#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF +#define WITNESS_RANK_DSS WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF + +#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}} + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct witness_s { + /* Name, used for printing lock order reversal messages. */ + const char *name; + + /* + * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses + * must be acquired in order of increasing rank. + */ + witness_rank_t rank; + + /* + * If two witnesses are of equal rank and they have the samp comp + * function pointer, it is called as a last attempt to differentiate + * between witnesses of equal rank. + */ + witness_comp_t *comp; + + /* Linkage for thread's currently owned locks. */ + ql_elm(witness_t) link; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp); +#ifdef JEMALLOC_JET +typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); +extern witness_lock_error_t *witness_lock_error; +#else +void witness_lock_error(const witness_list_t *witnesses, + const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_owner_error_t)(const witness_t *); +extern witness_owner_error_t *witness_owner_error; +#else +void witness_owner_error(const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_not_owner_error_t)(const witness_t *); +extern witness_not_owner_error_t *witness_not_owner_error; +#else +void witness_not_owner_error(const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_depth_error_t)(const witness_list_t *, + witness_rank_t rank_inclusive, unsigned depth); +extern witness_depth_error_t *witness_depth_error; +#else +void witness_depth_error(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth); +#endif + +void witnesses_cleanup(tsd_t *tsd); +void witness_fork_cleanup(tsd_t *tsd); +void witness_prefork(tsd_t *tsd); +void witness_postfork_parent(tsd_t *tsd); +void witness_postfork_child(tsd_t *tsd); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool witness_owner(tsd_t *tsd, const witness_t *witness); +void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness); +void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness); +void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive, + unsigned depth); +void witness_assert_depth(tsdn_t *tsdn, unsigned depth); +void witness_assert_lockless(tsdn_t *tsdn); +void witness_lock(tsdn_t *tsdn, witness_t *witness); +void witness_unlock(tsdn_t *tsdn, witness_t *witness); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) +JEMALLOC_INLINE bool +witness_owner(tsd_t *tsd, const witness_t *witness) +{ + witness_list_t *witnesses; + witness_t *w; + + cassert(config_debug); + + witnesses = tsd_witnessesp_get(tsd); + ql_foreach(w, witnesses, link) { + if (w == witness) + return (true); + } + + return (false); +} + +JEMALLOC_INLINE void +witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) +{ + tsd_t *tsd; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + if (witness_owner(tsd, witness)) + return; + witness_owner_error(witness); +} + +JEMALLOC_INLINE void +witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + witnesses = tsd_witnessesp_get(tsd); + ql_foreach(w, witnesses, link) { + if (w == witness) + witness_not_owner_error(witness); + } +} + +JEMALLOC_INLINE void +witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive, + unsigned depth) { + tsd_t *tsd; + unsigned d; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + + d = 0; + witnesses = tsd_witnessesp_get(tsd); + w = ql_last(witnesses, link); + if (w != NULL) { + ql_reverse_foreach(w, witnesses, link) { + if (w->rank < rank_inclusive) { + break; + } + d++; + } + } + if (d != depth) + witness_depth_error(witnesses, rank_inclusive, depth); +} + +JEMALLOC_INLINE void +witness_assert_depth(tsdn_t *tsdn, unsigned depth) { + witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth); +} + +JEMALLOC_INLINE void +witness_assert_lockless(tsdn_t *tsdn) { + witness_assert_depth(tsdn, 0); +} + +JEMALLOC_INLINE void +witness_lock(tsdn_t *tsdn, witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + witness_assert_not_owner(tsdn, witness); + + witnesses = tsd_witnessesp_get(tsd); + w = ql_last(witnesses, link); + if (w == NULL) { + /* No other locks; do nothing. */ + } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) { + /* Forking, and relaxed ranking satisfied. */ + } else if (w->rank > witness->rank) { + /* Not forking, rank order reversal. */ + witness_lock_error(witnesses, witness); + } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != + witness->comp || w->comp(w, witness) > 0)) { + /* + * Missing/incompatible comparison function, or comparison + * function indicates rank order reversal. + */ + witness_lock_error(witnesses, witness); + } + + ql_elm_new(witness, link); + ql_tail_insert(witnesses, witness, link); +} + +JEMALLOC_INLINE void +witness_unlock(tsdn_t *tsdn, witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + /* + * Check whether owner before removal, rather than relying on + * witness_assert_owner() to abort, so that unit tests can test this + * function's failure mode without causing undefined behavior. + */ + if (witness_owner(tsd, witness)) { + witnesses = tsd_witnessesp_get(tsd); + ql_remove(witnesses, witness, link); + } else + witness_assert_owner(tsdn, witness); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc.h b/sources/jemalloc/include-linux/jemalloc/jemalloc.h new file mode 100755 index 00000000..2d0c0b17 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc.h @@ -0,0 +1,386 @@ +#ifndef JEMALLOC_H_ +#define JEMALLOC_H_ +#ifdef __cplusplus +extern "C" { +#endif + +/* Defined if __attribute__((...)) syntax is supported. */ +#define JEMALLOC_HAVE_ATTR + +/* Defined if alloc_size attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE + +/* Defined if format(gnu_printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF + +/* Defined if format(printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE_MEMALIGN +#define JEMALLOC_OVERRIDE_VALLOC + +/* + * At least Linux omits the "const" in: + * + * size_t malloc_usable_size(const void *ptr); + * + * Match the operating system's prototype. + */ +#define JEMALLOC_USABLE_SIZE_CONST + +/* + * If defined, specify throw() for the public function prototypes when compiling + * with C++. The only justification for this is to match the prototypes that + * glibc defines. + */ +#define JEMALLOC_USE_CXX_THROW + +#ifdef _MSC_VER +# ifdef _WIN64 +# define LG_SIZEOF_PTR_WIN 3 +# else +# define LG_SIZEOF_PTR_WIN 2 +# endif +#endif + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#if (defined(__LP64__) && __LP64__) +#define LG_SIZEOF_PTR 3 +#else +#define LG_SIZEOF_PTR 2 +#endif + +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#ifndef JEMALLOC_NO_RENAME +# define je_malloc_conf je_malloc_conf +# define je_malloc_message je_malloc_message +# define je_malloc je_malloc +# define je_calloc je_calloc +# define je_posix_memalign je_posix_memalign +# define je_aligned_alloc je_aligned_alloc +# define je_realloc je_realloc +# define je_free je_free +# define je_mallocx je_mallocx +# define je_rallocx je_rallocx +# define je_xallocx je_xallocx +# define je_sallocx je_sallocx +# define je_dallocx je_dallocx +# define je_sdallocx je_sdallocx +# define je_nallocx je_nallocx +# define je_mallctl je_mallctl +# define je_mallctlnametomib je_mallctlnametomib +# define je_mallctlbymib je_mallctlbymib +# define je_malloc_stats_print je_malloc_stats_print +# define je_malloc_usable_size je_malloc_usable_size +# define je_memalign je_memalign +# define je_valloc je_valloc +#endif + +#include +#include +#include +#include +#include + +#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" +#define JEMALLOC_VERSION_MAJOR 0 +#define JEMALLOC_VERSION_MINOR 0 +#define JEMALLOC_VERSION_BUGFIX 0 +#define JEMALLOC_VERSION_NREV 0 +#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" + +# define MALLOCX_LG_ALIGN(la) ((int)(la)) +# if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +# else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +# endif +# define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if _MSC_VER +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#endif + +/* + * The je_ prefix on the following public symbol declarations is an artifact + * of namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). + */ +extern JEMALLOC_EXPORT const char *je_malloc_conf; +extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, + const char *s); + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) + JEMALLOC_CXX_THROW; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, + int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *je_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); +#endif + +/* + * void * + * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, + * bool *commit, unsigned arena_ind); + */ +typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); + +/* + * bool + * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); + */ +typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); + +/* + * bool + * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); + +/* + * bool + * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); + +typedef struct { + chunk_alloc_t *alloc; + chunk_dalloc_t *dalloc; + chunk_commit_t *commit; + chunk_decommit_t *decommit; + chunk_purge_t *purge; + chunk_split_t *split; + chunk_merge_t *merge; +} chunk_hooks_t; + +/* + * By default application code must explicitly refer to mangled symbol names, + * so that it is possible to use jemalloc in conjunction with another allocator + * in the same application. Define JEMALLOC_MANGLE in order to cause automatic + * name mangling that matches the API prefixing that happened as a result of + * --with-mangling and/or --with-jemalloc-prefix configuration settings. + */ +#ifdef JEMALLOC_MANGLE +# ifndef JEMALLOC_NO_DEMANGLE +# define JEMALLOC_NO_DEMANGLE +# endif +# define malloc_conf je_malloc_conf +# define malloc_message je_malloc_message +# define malloc je_malloc +# define calloc je_calloc +# define posix_memalign je_posix_memalign +# define aligned_alloc je_aligned_alloc +# define realloc je_realloc +# define free je_free +# define mallocx je_mallocx +# define rallocx je_rallocx +# define xallocx je_xallocx +# define sallocx je_sallocx +# define dallocx je_dallocx +# define sdallocx je_sdallocx +# define nallocx je_nallocx +# define mallctl je_mallctl +# define mallctlnametomib je_mallctlnametomib +# define mallctlbymib je_mallctlbymib +# define malloc_stats_print je_malloc_stats_print +# define malloc_usable_size je_malloc_usable_size +# define memalign je_memalign +# define valloc je_valloc +#endif + +/* + * The je_* macros can be used as stable alternative names for the + * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily + * meant for use in jemalloc itself, but it can be used by application code to + * provide isolation from the name mangling specified via --with-mangling + * and/or --with-jemalloc-prefix. + */ +#ifndef JEMALLOC_NO_DEMANGLE +# undef je_malloc_conf +# undef je_malloc_message +# undef je_malloc +# undef je_calloc +# undef je_posix_memalign +# undef je_aligned_alloc +# undef je_realloc +# undef je_free +# undef je_mallocx +# undef je_rallocx +# undef je_xallocx +# undef je_sallocx +# undef je_dallocx +# undef je_sdallocx +# undef je_nallocx +# undef je_mallctl +# undef je_mallctlnametomib +# undef je_mallctlbymib +# undef je_malloc_stats_print +# undef je_malloc_usable_size +# undef je_memalign +# undef je_valloc +#endif + +#ifdef __cplusplus +} +#endif +#endif /* JEMALLOC_H_ */ diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_defs.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_defs.h new file mode 100755 index 00000000..9a5948d5 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc_defs.h @@ -0,0 +1,50 @@ +/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ +/* Defined if __attribute__((...)) syntax is supported. */ +#define JEMALLOC_HAVE_ATTR + +/* Defined if alloc_size attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE + +/* Defined if format(gnu_printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF + +/* Defined if format(printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE_MEMALIGN +#define JEMALLOC_OVERRIDE_VALLOC + +/* + * At least Linux omits the "const" in: + * + * size_t malloc_usable_size(const void *ptr); + * + * Match the operating system's prototype. + */ +#define JEMALLOC_USABLE_SIZE_CONST + +/* + * If defined, specify throw() for the public function prototypes when compiling + * with C++. The only justification for this is to match the prototypes that + * glibc defines. + */ +#define JEMALLOC_USE_CXX_THROW + +#ifdef _MSC_VER +# ifdef _WIN64 +# define LG_SIZEOF_PTR_WIN 3 +# else +# define LG_SIZEOF_PTR_WIN 2 +# endif +#endif + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#if (defined(__LP64__) && __LP64__) +#define LG_SIZEOF_PTR 3 +#else +#define LG_SIZEOF_PTR 2 +#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_macros.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_macros.h new file mode 100755 index 00000000..fdc318ef --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc_macros.h @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include + +#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" +#define JEMALLOC_VERSION_MAJOR 0 +#define JEMALLOC_VERSION_MINOR 0 +#define JEMALLOC_VERSION_BUGFIX 0 +#define JEMALLOC_VERSION_NREV 0 +#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" + +# define MALLOCX_LG_ALIGN(la) ((int)(la)) +# if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +# else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +# endif +# define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if _MSC_VER +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle.h new file mode 100755 index 00000000..34872e83 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle.h @@ -0,0 +1,66 @@ +/* + * By default application code must explicitly refer to mangled symbol names, + * so that it is possible to use jemalloc in conjunction with another allocator + * in the same application. Define JEMALLOC_MANGLE in order to cause automatic + * name mangling that matches the API prefixing that happened as a result of + * --with-mangling and/or --with-jemalloc-prefix configuration settings. + */ +#ifdef JEMALLOC_MANGLE +# ifndef JEMALLOC_NO_DEMANGLE +# define JEMALLOC_NO_DEMANGLE +# endif +# define malloc_conf je_malloc_conf +# define malloc_message je_malloc_message +# define malloc je_malloc +# define calloc je_calloc +# define posix_memalign je_posix_memalign +# define aligned_alloc je_aligned_alloc +# define realloc je_realloc +# define free je_free +# define mallocx je_mallocx +# define rallocx je_rallocx +# define xallocx je_xallocx +# define sallocx je_sallocx +# define dallocx je_dallocx +# define sdallocx je_sdallocx +# define nallocx je_nallocx +# define mallctl je_mallctl +# define mallctlnametomib je_mallctlnametomib +# define mallctlbymib je_mallctlbymib +# define malloc_stats_print je_malloc_stats_print +# define malloc_usable_size je_malloc_usable_size +# define memalign je_memalign +# define valloc je_valloc +#endif + +/* + * The je_* macros can be used as stable alternative names for the + * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily + * meant for use in jemalloc itself, but it can be used by application code to + * provide isolation from the name mangling specified via --with-mangling + * and/or --with-jemalloc-prefix. + */ +#ifndef JEMALLOC_NO_DEMANGLE +# undef je_malloc_conf +# undef je_malloc_message +# undef je_malloc +# undef je_calloc +# undef je_posix_memalign +# undef je_aligned_alloc +# undef je_realloc +# undef je_free +# undef je_mallocx +# undef je_rallocx +# undef je_xallocx +# undef je_sallocx +# undef je_dallocx +# undef je_sdallocx +# undef je_nallocx +# undef je_mallctl +# undef je_mallctlnametomib +# undef je_mallctlbymib +# undef je_malloc_stats_print +# undef je_malloc_usable_size +# undef je_memalign +# undef je_valloc +#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle_jet.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle_jet.h new file mode 100755 index 00000000..db5b7b0c --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc_mangle_jet.h @@ -0,0 +1,66 @@ +/* + * By default application code must explicitly refer to mangled symbol names, + * so that it is possible to use jemalloc in conjunction with another allocator + * in the same application. Define JEMALLOC_MANGLE in order to cause automatic + * name mangling that matches the API prefixing that happened as a result of + * --with-mangling and/or --with-jemalloc-prefix configuration settings. + */ +#ifdef JEMALLOC_MANGLE +# ifndef JEMALLOC_NO_DEMANGLE +# define JEMALLOC_NO_DEMANGLE +# endif +# define malloc_conf jet_malloc_conf +# define malloc_message jet_malloc_message +# define malloc jet_malloc +# define calloc jet_calloc +# define posix_memalign jet_posix_memalign +# define aligned_alloc jet_aligned_alloc +# define realloc jet_realloc +# define free jet_free +# define mallocx jet_mallocx +# define rallocx jet_rallocx +# define xallocx jet_xallocx +# define sallocx jet_sallocx +# define dallocx jet_dallocx +# define sdallocx jet_sdallocx +# define nallocx jet_nallocx +# define mallctl jet_mallctl +# define mallctlnametomib jet_mallctlnametomib +# define mallctlbymib jet_mallctlbymib +# define malloc_stats_print jet_malloc_stats_print +# define malloc_usable_size jet_malloc_usable_size +# define memalign jet_memalign +# define valloc jet_valloc +#endif + +/* + * The jet_* macros can be used as stable alternative names for the + * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily + * meant for use in jemalloc itself, but it can be used by application code to + * provide isolation from the name mangling specified via --with-mangling + * and/or --with-jemalloc-prefix. + */ +#ifndef JEMALLOC_NO_DEMANGLE +# undef jet_malloc_conf +# undef jet_malloc_message +# undef jet_malloc +# undef jet_calloc +# undef jet_posix_memalign +# undef jet_aligned_alloc +# undef jet_realloc +# undef jet_free +# undef jet_mallocx +# undef jet_rallocx +# undef jet_xallocx +# undef jet_sallocx +# undef jet_dallocx +# undef jet_sdallocx +# undef jet_nallocx +# undef jet_mallctl +# undef jet_mallctlnametomib +# undef jet_mallctlbymib +# undef jet_malloc_stats_print +# undef jet_malloc_usable_size +# undef jet_memalign +# undef jet_valloc +#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_protos.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_protos.h new file mode 100755 index 00000000..ff025e30 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc_protos.h @@ -0,0 +1,66 @@ +/* + * The je_ prefix on the following public symbol declarations is an artifact + * of namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). + */ +extern JEMALLOC_EXPORT const char *je_malloc_conf; +extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, + const char *s); + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) + JEMALLOC_CXX_THROW; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, + int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *je_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); +#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_protos_jet.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_protos_jet.h new file mode 100755 index 00000000..f71efef0 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc_protos_jet.h @@ -0,0 +1,66 @@ +/* + * The jet_ prefix on the following public symbol declarations is an artifact + * of namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h). + */ +extern JEMALLOC_EXPORT const char *jet_malloc_conf; +extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque, + const char *s); + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_free(void *ptr) + JEMALLOC_CXX_THROW; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_sdallocx(void *ptr, size_t size, + int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctl(const char *name, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_malloc_stats_print( + void (*write_cb)(void *, const char *), void *jet_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); +#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_rename.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_rename.h new file mode 100755 index 00000000..22e53206 --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc_rename.h @@ -0,0 +1,29 @@ +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#ifndef JEMALLOC_NO_RENAME +# define je_malloc_conf je_malloc_conf +# define je_malloc_message je_malloc_message +# define je_malloc je_malloc +# define je_calloc je_calloc +# define je_posix_memalign je_posix_memalign +# define je_aligned_alloc je_aligned_alloc +# define je_realloc je_realloc +# define je_free je_free +# define je_mallocx je_mallocx +# define je_rallocx je_rallocx +# define je_xallocx je_xallocx +# define je_sallocx je_sallocx +# define je_dallocx je_dallocx +# define je_sdallocx je_sdallocx +# define je_nallocx je_nallocx +# define je_mallctl je_mallctl +# define je_mallctlnametomib je_mallctlnametomib +# define je_mallctlbymib je_mallctlbymib +# define je_malloc_stats_print je_malloc_stats_print +# define je_malloc_usable_size je_malloc_usable_size +# define je_memalign je_memalign +# define je_valloc je_valloc +#endif diff --git a/sources/jemalloc/include-linux/jemalloc/jemalloc_typedefs.h b/sources/jemalloc/include-linux/jemalloc/jemalloc_typedefs.h new file mode 100755 index 00000000..fa7b350a --- /dev/null +++ b/sources/jemalloc/include-linux/jemalloc/jemalloc_typedefs.h @@ -0,0 +1,57 @@ +/* + * void * + * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, + * bool *commit, unsigned arena_ind); + */ +typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); + +/* + * bool + * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); + */ +typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); + +/* + * bool + * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); + +/* + * bool + * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); + +typedef struct { + chunk_alloc_t *alloc; + chunk_dalloc_t *dalloc; + chunk_commit_t *commit; + chunk_decommit_t *decommit; + chunk_purge_t *purge; + chunk_split_t *split; + chunk_merge_t *merge; +} chunk_hooks_t; diff --git a/sources/jemalloc/include-win/jemalloc/internal/arena.h b/sources/jemalloc/include-win/jemalloc/internal/arena.h new file mode 100755 index 00000000..119e3a59 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/arena.h @@ -0,0 +1,1528 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) + +/* Maximum number of regions in one run. */ +#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN) +#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) + +/* + * Minimum redzone size. Redzones may be larger than this if necessary to + * preserve region alignment. + */ +#define REDZONE_MINSIZE 16 + +/* + * The minimum ratio of active:dirty pages per arena is computed as: + * + * (nactive >> lg_dirty_mult) >= ndirty + * + * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as + * many active pages as dirty pages. + */ +#define LG_DIRTY_MULT_DEFAULT 3 + +typedef enum { + purge_mode_ratio = 0, + purge_mode_decay = 1, + + purge_mode_limit = 2 +} purge_mode_t; +#define PURGE_DEFAULT purge_mode_ratio +/* Default decay time in seconds. */ +#define DECAY_TIME_DEFAULT 10 +/* Number of event ticks between time checks. */ +#define DECAY_NTICKS_PER_UPDATE 1000 + +typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; +typedef struct arena_avail_links_s arena_avail_links_t; +typedef struct arena_run_s arena_run_t; +typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; +typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; +typedef struct arena_chunk_s arena_chunk_t; +typedef struct arena_bin_info_s arena_bin_info_t; +typedef struct arena_decay_s arena_decay_t; +typedef struct arena_bin_s arena_bin_t; +typedef struct arena_s arena_t; +typedef struct arena_tdata_s arena_tdata_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#ifdef JEMALLOC_ARENA_STRUCTS_A +struct arena_run_s { + /* Index of bin this run is associated with. */ + szind_t binind; + + /* Number of free regions in run. */ + unsigned nfree; + + /* Per region allocated/deallocated bitmap. */ + bitmap_t bitmap[BITMAP_GROUPS_MAX]; +}; + +/* Each element of the chunk map corresponds to one page within the chunk. */ +struct arena_chunk_map_bits_s { + /* + * Run address (or size) and various flags are stored together. The bit + * layout looks like (assuming 32-bit system): + * + * ???????? ???????? ???nnnnn nnndumla + * + * ? : Unallocated: Run address for first/last pages, unset for internal + * pages. + * Small: Run page offset. + * Large: Run page count for first page, unset for trailing pages. + * n : binind for small size class, BININD_INVALID for large size class. + * d : dirty? + * u : unzeroed? + * m : decommitted? + * l : large? + * a : allocated? + * + * Following are example bit patterns for the three types of runs. + * + * p : run page offset + * s : run size + * n : binind for size class; large objects set these to BININD_INVALID + * x : don't care + * - : 0 + * + : 1 + * [DUMLA] : bit set + * [dumla] : bit unset + * + * Unallocated (clean): + * ssssssss ssssssss sss+++++ +++dum-a + * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx + * ssssssss ssssssss sss+++++ +++dUm-a + * + * Unallocated (dirty): + * ssssssss ssssssss sss+++++ +++D-m-a + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * ssssssss ssssssss sss+++++ +++D-m-a + * + * Small: + * pppppppp pppppppp pppnnnnn nnnd---A + * pppppppp pppppppp pppnnnnn nnn----A + * pppppppp pppppppp pppnnnnn nnnd---A + * + * Large: + * ssssssss ssssssss sss+++++ +++D--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + * + * Large (sampled, size <= LARGE_MINCLASS): + * ssssssss ssssssss sssnnnnn nnnD--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + * + * Large (not sampled, size == LARGE_MINCLASS): + * ssssssss ssssssss sss+++++ +++D--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + */ + size_t bits; +#define CHUNK_MAP_ALLOCATED ((size_t)0x01U) +#define CHUNK_MAP_LARGE ((size_t)0x02U) +#define CHUNK_MAP_STATE_MASK ((size_t)0x3U) + +#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U) +#define CHUNK_MAP_UNZEROED ((size_t)0x08U) +#define CHUNK_MAP_DIRTY ((size_t)0x10U) +#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU) + +#define CHUNK_MAP_BININD_SHIFT 5 +#define BININD_INVALID ((size_t)0xffU) +#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) +#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK + +#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8) +#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE) +#define CHUNK_MAP_SIZE_MASK \ + (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK)) +}; + +struct arena_runs_dirty_link_s { + qr(arena_runs_dirty_link_t) rd_link; +}; + +/* + * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just + * like arena_chunk_map_bits_t. Two separate arrays are stored within each + * chunk header in order to improve cache locality. + */ +struct arena_chunk_map_misc_s { + /* + * Linkage for run heaps. There are two disjoint uses: + * + * 1) arena_t's runs_avail heaps. + * 2) arena_run_t conceptually uses this linkage for in-use non-full + * runs, rather than directly embedding linkage. + */ + phn(arena_chunk_map_misc_t) ph_link; + + union { + /* Linkage for list of dirty runs. */ + arena_runs_dirty_link_t rd; + + /* Profile counters, used for large object runs. */ + union { + void *prof_tctx_pun; + prof_tctx_t *prof_tctx; + }; + + /* Small region run metadata. */ + arena_run_t run; + }; +}; +typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; +#endif /* JEMALLOC_ARENA_STRUCTS_A */ + +#ifdef JEMALLOC_ARENA_STRUCTS_B +/* Arena chunk header. */ +struct arena_chunk_s { + /* + * A pointer to the arena that owns the chunk is stored within the node. + * This field as a whole is used by chunks_rtree to support both + * ivsalloc() and core-based debugging. + */ + extent_node_t node; + + /* + * True if memory could be backed by transparent huge pages. This is + * only directly relevant to Linux, since it is the only supported + * platform on which jemalloc interacts with explicit transparent huge + * page controls. + */ + bool hugepage; + + /* + * Map of pages within chunk that keeps track of free/large/small. The + * first map_bias entries are omitted, since the chunk header does not + * need to be tracked in the map. This omission saves a header page + * for common chunk sizes (e.g. 4 MiB). + */ + arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */ +}; + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each run has the following layout: + * + * /--------------------\ + * | pad? | + * |--------------------| + * | redzone | + * reg0_offset | region 0 | + * | redzone | + * |--------------------| \ + * | redzone | | + * | region 1 | > reg_interval + * | redzone | / + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | redzone | + * | region nregs-1 | + * | redzone | + * |--------------------| + * | alignment pad? | + * \--------------------/ + * + * reg_interval has at least the same minimum alignment as reg_size; this + * preserves the alignment constraint that sa2u() depends on. Alignment pad is + * either 0 or redzone_size; it is present only if needed to align reg0_offset. + */ +struct arena_bin_info_s { + /* Size of regions in a run for this bin's size class. */ + size_t reg_size; + + /* Redzone size. */ + size_t redzone_size; + + /* Interval between regions (reg_size + (redzone_size << 1)). */ + size_t reg_interval; + + /* Total size of a run for this bin's size class. */ + size_t run_size; + + /* Total number of regions in a run for this bin's size class. */ + uint32_t nregs; + + /* + * Metadata used to manipulate bitmaps for runs associated with this + * bin. + */ + bitmap_info_t bitmap_info; + + /* Offset of first region in a run for this bin's size class. */ + uint32_t reg0_offset; +}; + +struct arena_decay_s { + /* + * Approximate time in seconds from the creation of a set of unused + * dirty pages until an equivalent set of unused dirty pages is purged + * and/or reused. + */ + ssize_t time; + /* time / SMOOTHSTEP_NSTEPS. */ + nstime_t interval; + /* + * Time at which the current decay interval logically started. We do + * not actually advance to a new epoch until sometime after it starts + * because of scheduling and computation delays, and it is even possible + * to completely skip epochs. In all cases, during epoch advancement we + * merge all relevant activity into the most recently recorded epoch. + */ + nstime_t epoch; + /* Deadline randomness generator. */ + uint64_t jitter_state; + /* + * Deadline for current epoch. This is the sum of interval and per + * epoch jitter which is a uniform random variable in [0..interval). + * Epochs always advance by precise multiples of interval, but we + * randomize the deadline to reduce the likelihood of arenas purging in + * lockstep. + */ + nstime_t deadline; + /* + * Number of dirty pages at beginning of current epoch. During epoch + * advancement we use the delta between arena->decay.ndirty and + * arena->ndirty to determine how many dirty pages, if any, were + * generated. + */ + size_t ndirty; + /* + * Trailing log of how many unused dirty pages were generated during + * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last + * element is the most recent epoch. Corresponding epoch times are + * relative to epoch. + */ + size_t backlog[SMOOTHSTEP_NSTEPS]; +}; + +struct arena_bin_s { + /* + * All operations on runcur, runs, and stats require that lock be + * locked. Run allocation/deallocation are protected by the arena lock, + * which may be acquired while holding one or more bin locks, but not + * vise versa. + */ + malloc_mutex_t lock; + + /* + * Current run being used to service allocations of this bin's size + * class. + */ + arena_run_t *runcur; + + /* + * Heap of non-full runs. This heap is used when looking for an + * existing run when runcur is no longer usable. We choose the + * non-full run that is lowest in memory; this policy tends to keep + * objects packed well, and it can also help reduce the number of + * almost-empty chunks. + */ + arena_run_heap_t runs; + + /* Bin statistics. */ + malloc_bin_stats_t stats; +}; + +struct arena_s { + /* This arena's index within the arenas array. */ + unsigned ind; + + /* + * Number of threads currently assigned to this arena, synchronized via + * atomic operations. Each thread has two distinct assignments, one for + * application-serving allocation, and the other for internal metadata + * allocation. Internal metadata must not be allocated from arenas + * created via the arenas.extend mallctl, because the arena..reset + * mallctl indiscriminately discards all allocations for the affected + * arena. + * + * 0: Application allocation. + * 1: Internal metadata allocation. + */ + unsigned nthreads[2]; + + /* + * There are three classes of arena operations from a locking + * perspective: + * 1) Thread assignment (modifies nthreads) is synchronized via atomics. + * 2) Bin-related operations are protected by bin locks. + * 3) Chunk- and run-related operations are protected by this mutex. + */ + malloc_mutex_t lock; + + arena_stats_t stats; + /* + * List of tcaches for extant threads associated with this arena. + * Stats from these are merged incrementally, and at exit if + * opt_stats_print is enabled. + */ + ql_head(tcache_t) tcache_ql; + + uint64_t prof_accumbytes; + + /* + * PRNG state for cache index randomization of large allocation base + * pointers. + */ + size_t offset_state; + + dss_prec_t dss_prec; + + /* Extant arena chunks. */ + ql_head(extent_node_t) achunks; + + /* Extent serial number generator state. */ + size_t extent_sn_next; + + /* + * In order to avoid rapid chunk allocation/deallocation when an arena + * oscillates right on the cusp of needing a new chunk, cache the most + * recently freed chunk. The spare is left in the arena's chunk trees + * until it is deleted. + * + * There is one spare chunk per arena, rather than one spare total, in + * order to avoid interactions between multiple threads that could make + * a single spare inadequate. + */ + arena_chunk_t *spare; + + /* Minimum ratio (log base 2) of nactive:ndirty. */ + ssize_t lg_dirty_mult; + + /* True if a thread is currently executing arena_purge_to_limit(). */ + bool purging; + + /* Number of pages in active runs and huge regions. */ + size_t nactive; + + /* + * Current count of pages within unused runs that are potentially + * dirty, and for which madvise(... MADV_DONTNEED) has not been called. + * By tracking this, we can institute a limit on how much dirty unused + * memory is mapped for each arena. + */ + size_t ndirty; + + /* + * Unused dirty memory this arena manages. Dirty memory is conceptually + * tracked as an arbitrarily interleaved LRU of dirty runs and cached + * chunks, but the list linkage is actually semi-duplicated in order to + * avoid extra arena_chunk_map_misc_t space overhead. + * + * LRU-----------------------------------------------------------MRU + * + * /-- arena ---\ + * | | + * | | + * |------------| /- chunk -\ + * ...->|chunks_cache|<--------------------------->| /----\ |<--... + * |------------| | |node| | + * | | | | | | + * | | /- run -\ /- run -\ | | | | + * | | | | | | | | | | + * | | | | | | | | | | + * |------------| |-------| |-------| | |----| | + * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----... + * |------------| |-------| |-------| | |----| | + * | | | | | | | | | | + * | | | | | | | \----/ | + * | | \-------/ \-------/ | | + * | | | | + * | | | | + * \------------/ \---------/ + */ + arena_runs_dirty_link_t runs_dirty; + extent_node_t chunks_cache; + + /* Decay-based purging state. */ + arena_decay_t decay; + + /* Extant huge allocations. */ + ql_head(extent_node_t) huge; + /* Synchronizes all huge allocation/update/deallocation. */ + malloc_mutex_t huge_mtx; + + /* + * Trees of chunks that were previously allocated (trees differ only in + * node ordering). These are used when allocating chunks, in an attempt + * to re-use address space. Depending on function, different tree + * orderings are needed, which is why there are two trees with the same + * contents. + */ + extent_tree_t chunks_szsnad_cached; + extent_tree_t chunks_ad_cached; + extent_tree_t chunks_szsnad_retained; + extent_tree_t chunks_ad_retained; + + malloc_mutex_t chunks_mtx; + /* Cache of nodes that were allocated via base_alloc(). */ + ql_head(extent_node_t) node_cache; + malloc_mutex_t node_cache_mtx; + + /* User-configurable chunk hook functions. */ + chunk_hooks_t chunk_hooks; + + /* bins is used to store trees of free regions. */ + arena_bin_t bins[NBINS]; + + /* + * Size-segregated address-ordered heaps of this arena's available runs, + * used for first-best-fit run allocation. Runs are quantized, i.e. + * they reside in the last heap which corresponds to a size class less + * than or equal to the run size. + */ + arena_run_heap_t runs_avail[NPSIZES]; +}; + +/* Used in conjunction with tsd for fast arena-related context lookup. */ +struct arena_tdata_s { + ticker_t decay_ticker; +}; +#endif /* JEMALLOC_ARENA_STRUCTS_B */ + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +static const size_t large_pad = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + PAGE +#else + 0 +#endif + ; + +extern bool opt_thp; +extern purge_mode_t opt_purge; +extern const char *purge_mode_names[]; +extern ssize_t opt_lg_dirty_mult; +extern ssize_t opt_decay_time; + +extern arena_bin_info_t arena_bin_info[NBINS]; + +extern size_t map_bias; /* Number of arena chunk header pages. */ +extern size_t map_misc_offset; +extern size_t arena_maxrun; /* Max run size for arenas. */ +extern size_t large_maxclass; /* Max large size class. */ +extern unsigned nlclasses; /* Number of large size classes. */ +extern unsigned nhclasses; /* Number of huge size classes. */ + +#ifdef JEMALLOC_JET +typedef size_t (run_quantize_t)(size_t); +extern run_quantize_t *run_quantize_floor; +extern run_quantize_t *run_quantize_ceil; +#endif +void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, + bool cache); +void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, + bool cache); +extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); +void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); +void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, size_t *sn, bool *zero); +void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t usize, size_t sn); +void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize); +void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize, size_t sn); +bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize, bool *zero); +ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); +bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, + ssize_t lg_dirty_mult); +ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); +bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); +void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); +void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); +void arena_reset(tsd_t *tsd, arena_t *arena); +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, + tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); +void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, + bool zero); +#ifdef JEMALLOC_JET +typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, + uint8_t); +extern arena_redzone_corruption_t *arena_redzone_corruption; +typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); +extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; +#else +void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); +#endif +void arena_quarantine_junk_small(void *ptr, size_t usize); +void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, + bool zero); +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t ind, bool zero); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); +void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind); +#ifdef JEMALLOC_JET +typedef void (arena_dalloc_junk_large_t)(void *, size_t); +extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; +#else +void arena_dalloc_junk_large(void *ptr, size_t usize); +#endif +void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr); +void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr); +#ifdef JEMALLOC_JET +typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); +extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; +#endif +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t size, size_t extra, bool zero); +void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache); +dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); +bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); +ssize_t arena_lg_dirty_mult_default_get(void); +bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); +ssize_t arena_decay_time_default_get(void); +bool arena_decay_time_default_set(ssize_t decay_time); +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, + unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, + ssize_t *decay_time, size_t *nactive, size_t *ndirty); +void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, + malloc_huge_stats_t *hstats); +unsigned arena_nthreads_get(arena_t *arena, bool internal); +void arena_nthreads_inc(arena_t *arena, bool internal); +void arena_nthreads_dec(arena_t *arena, bool internal); +size_t arena_extent_sn_next(arena_t *arena); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind); +void arena_boot(void); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, + size_t pageind); +const arena_chunk_map_bits_t *arena_bitselm_get_const( + const arena_chunk_t *chunk, size_t pageind); +arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, + size_t pageind); +const arena_chunk_map_misc_t *arena_miscelm_get_const( + const arena_chunk_t *chunk, size_t pageind); +size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); +void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); +arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); +arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); +size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); +const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbitsp_read(const size_t *mapbitsp); +size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_size_decode(size_t mapbits); +size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, + size_t pageind); +szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); +void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); +size_t arena_mapbits_size_encode(size_t size); +void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, + size_t size, size_t flags); +void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, + size_t size); +void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, + size_t flags); +void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, + size_t size, size_t flags); +void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, + szind_t binind); +void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, + size_t runind, szind_t binind, size_t flags); +void arena_metadata_allocated_add(arena_t *arena, size_t size); +void arena_metadata_allocated_sub(arena_t *arena, size_t size); +size_t arena_metadata_allocated_get(arena_t *arena); +bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); +szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); +szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); +size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, + const void *ptr); +prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *old_tctx); +void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); +void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); +void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero, tcache_t *tcache, bool slow_path); +arena_t *arena_aalloc(const void *ptr); +size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); +void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); +void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) +# ifdef JEMALLOC_ARENA_INLINE_A +JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * +arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) +{ + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return (&chunk->map_bits[pageind-map_bias]); +} + +JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * +arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) +{ + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + + (uintptr_t)map_misc_offset) + pageind-map_bias); +} + +JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * +arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + + map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return (pageind); +} + +JEMALLOC_ALWAYS_INLINE void * +arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + + return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) +{ + arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t + *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd)); + + assert(arena_miscelm_to_pageind(miscelm) >= map_bias); + assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); + + return (miscelm); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_run_to_miscelm(arena_run_t *run) +{ + arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t + *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run)); + + assert(arena_miscelm_to_pageind(miscelm) >= map_bias); + assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); + + return (miscelm); +} + +JEMALLOC_ALWAYS_INLINE size_t * +arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) +{ + + return (&arena_bitselm_get_mutable(chunk, pageind)->bits); +} + +JEMALLOC_ALWAYS_INLINE const size_t * +arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbitsp_read(const size_t *mapbitsp) +{ + + return (*mapbitsp); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_size_decode(size_t mapbits) +{ + size_t size; + +#if CHUNK_MAP_SIZE_SHIFT > 0 + size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; +#elif CHUNK_MAP_SIZE_SHIFT == 0 + size = mapbits & CHUNK_MAP_SIZE_MASK; +#else + size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; +#endif + + return (size); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + return (arena_mapbits_size_decode(mapbits)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == + (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); + return (arena_mapbits_size_decode(mapbits)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == + CHUNK_MAP_ALLOCATED); + return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); +} + +JEMALLOC_ALWAYS_INLINE szind_t +arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + szind_t binind; + + mapbits = arena_mapbits_get(chunk, pageind); + binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; + assert(binind < NBINS || binind == BININD_INVALID); + return (binind); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_DIRTY); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_UNZEROED); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_DECOMMITTED); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_LARGE); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) +{ + + *mapbitsp = mapbits; +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_size_encode(size_t size) +{ + size_t mapbits; + +#if CHUNK_MAP_SIZE_SHIFT > 0 + mapbits = size << CHUNK_MAP_SIZE_SHIFT; +#elif CHUNK_MAP_SIZE_SHIFT == 0 + mapbits = size; +#else + mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; +#endif + + assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); + return (mapbits); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, + size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + + assert((size & PAGE_MASK) == 0); + assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); + assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + CHUNK_MAP_BININD_INVALID | flags); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, + size_t size) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); + + assert((size & PAGE_MASK) == 0); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + (mapbits & ~CHUNK_MAP_SIZE_MASK)); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + + assert((flags & CHUNK_MAP_UNZEROED) == flags); + arena_mapbitsp_write(mapbitsp, flags); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, + size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + + assert((size & PAGE_MASK) == 0); + assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); + assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | + CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, + szind_t binind) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); + + assert(binind <= BININD_INVALID); + assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + + large_pad); + arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | + (binind << CHUNK_MAP_BININD_SHIFT)); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, + szind_t binind, size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + + assert(binind < BININD_INVALID); + assert(pageind - runind >= map_bias); + assert((flags & CHUNK_MAP_UNZEROED) == flags); + arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | + (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_INLINE void +arena_metadata_allocated_add(arena_t *arena, size_t size) +{ + + atomic_add_z(&arena->stats.metadata_allocated, size); +} + +JEMALLOC_INLINE void +arena_metadata_allocated_sub(arena_t *arena, size_t size) +{ + + atomic_sub_z(&arena->stats.metadata_allocated, size); +} + +JEMALLOC_INLINE size_t +arena_metadata_allocated_get(arena_t *arena) +{ + + return (atomic_read_z(&arena->stats.metadata_allocated)); +} + +JEMALLOC_INLINE bool +arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + assert(prof_interval != 0); + + arena->prof_accumbytes += accumbytes; + if (arena->prof_accumbytes >= prof_interval) { + arena->prof_accumbytes -= prof_interval; + return (true); + } + return (false); +} + +JEMALLOC_INLINE bool +arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + + if (likely(prof_interval == 0)) + return (false); + return (arena_prof_accum_impl(arena, accumbytes)); +} + +JEMALLOC_INLINE bool +arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + + if (likely(prof_interval == 0)) + return (false); + + { + bool ret; + + malloc_mutex_lock(tsdn, &arena->lock); + ret = arena_prof_accum_impl(arena, accumbytes); + malloc_mutex_unlock(tsdn, &arena->lock); + return (ret); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +arena_ptr_small_binind_get(const void *ptr, size_t mapbits) +{ + szind_t binind; + + binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; + + if (config_debug) { + arena_chunk_t *chunk; + arena_t *arena; + size_t pageind; + size_t actual_mapbits; + size_t rpages_ind; + const arena_run_t *run; + arena_bin_t *bin; + szind_t run_binind, actual_binind; + arena_bin_info_t *bin_info; + const arena_chunk_map_misc_t *miscelm; + const void *rpages; + + assert(binind != BININD_INVALID); + assert(binind < NBINS); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = extent_node_arena_get(&chunk->node); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + actual_mapbits = arena_mapbits_get(chunk, pageind); + assert(mapbits == actual_mapbits); + assert(arena_mapbits_large_get(chunk, pageind) == 0); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, + pageind); + miscelm = arena_miscelm_get_const(chunk, rpages_ind); + run = &miscelm->run; + run_binind = run->binind; + bin = &arena->bins[run_binind]; + actual_binind = (szind_t)(bin - arena->bins); + assert(run_binind == actual_binind); + bin_info = &arena_bin_info[actual_binind]; + rpages = arena_miscelm_to_rpages(miscelm); + assert(((uintptr_t)ptr - ((uintptr_t)rpages + + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval + == 0); + } + + return (binind); +} +# endif /* JEMALLOC_ARENA_INLINE_A */ + +# ifdef JEMALLOC_ARENA_INLINE_B +JEMALLOC_INLINE szind_t +arena_bin_index(arena_t *arena, arena_bin_t *bin) +{ + szind_t binind = (szind_t)(bin - arena->bins); + assert(binind < NBINS); + return (binind); +} + +JEMALLOC_INLINE size_t +arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) +{ + size_t diff, interval, shift, regind; + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + void *rpages = arena_miscelm_to_rpages(miscelm); + + /* + * Freeing a pointer lower than region zero can cause assertion + * failure. + */ + assert((uintptr_t)ptr >= (uintptr_t)rpages + + (uintptr_t)bin_info->reg0_offset); + + /* + * Avoid doing division with a variable divisor if possible. Using + * actual division here can reduce allocator throughput by over 20%! + */ + diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages - + bin_info->reg0_offset); + + /* Rescale (factor powers of 2 out of the numerator and denominator). */ + interval = bin_info->reg_interval; + shift = ffs_zu(interval) - 1; + diff >>= shift; + interval >>= shift; + + if (interval == 1) { + /* The divisor was a power of 2. */ + regind = diff; + } else { + /* + * To divide by a number D that is not a power of two we + * multiply by (2^21 / D) and then right shift by 21 positions. + * + * X / D + * + * becomes + * + * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT + * + * We can omit the first three elements, because we never + * divide by 0, and 1 and 2 are both powers of two, which are + * handled above. + */ +#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS) +#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1) + static const size_t interval_invs[] = { + SIZE_INV(3), + SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), + SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), + SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), + SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), + SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), + SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), + SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) + }; + + if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t)) + + 2))) { + regind = (diff * interval_invs[interval - 3]) >> + SIZE_INV_SHIFT; + } else + regind = diff / interval; +#undef SIZE_INV +#undef SIZE_INV_SHIFT + } + assert(diff == regind * interval); + assert(regind < bin_info->nregs); + + return (regind); +} + +JEMALLOC_INLINE prof_tctx_t * +arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) +{ + prof_tctx_t *ret; + arena_chunk_t *chunk; + + cassert(config_prof); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) + ret = (prof_tctx_t *)(uintptr_t)1U; + else { + arena_chunk_map_misc_t *elm = + arena_miscelm_get_mutable(chunk, pageind); + ret = atomic_read_p(&elm->prof_tctx_pun); + } + } else + ret = huge_prof_tctx_get(tsdn, ptr); + + return (ret); +} + +JEMALLOC_INLINE void +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) +{ + arena_chunk_t *chunk; + + cassert(config_prof); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + + if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > + (uintptr_t)1U)) { + arena_chunk_map_misc_t *elm; + + assert(arena_mapbits_large_get(chunk, pageind) != 0); + + elm = arena_miscelm_get_mutable(chunk, pageind); + atomic_write_p(&elm->prof_tctx_pun, tctx); + } else { + /* + * tctx must always be initialized for large runs. + * Assert that the surrounding conditional logic is + * equivalent to checking whether ptr refers to a large + * run. + */ + assert(arena_mapbits_large_get(chunk, pageind) == 0); + } + } else + huge_prof_tctx_set(tsdn, ptr, tctx); +} + +JEMALLOC_INLINE void +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *old_tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && + (uintptr_t)old_tctx > (uintptr_t)1U))) { + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind; + arena_chunk_map_misc_t *elm; + + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> + LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != + 0); + assert(arena_mapbits_large_get(chunk, pageind) != 0); + + elm = arena_miscelm_get_mutable(chunk, pageind); + atomic_write_p(&elm->prof_tctx_pun, + (prof_tctx_t *)(uintptr_t)1U); + } else + huge_prof_tctx_reset(tsdn, ptr); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) +{ + tsd_t *tsd; + ticker_t *decay_ticker; + + if (unlikely(tsdn_null(tsdn))) + return; + tsd = tsdn_tsd(tsdn); + decay_ticker = decay_ticker_get(tsd, arena->ind); + if (unlikely(decay_ticker == NULL)) + return; + if (unlikely(ticker_ticks(decay_ticker, nticks))) + arena_purge(tsdn, arena, false); +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_tick(tsdn_t *tsdn, arena_t *arena) +{ + + arena_decay_ticks(tsdn, arena, 1); +} + +JEMALLOC_ALWAYS_INLINE void * +arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, + tcache_t *tcache, bool slow_path) +{ + + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(size != 0); + + if (likely(tcache != NULL)) { + if (likely(size <= SMALL_MAXCLASS)) { + return (tcache_alloc_small(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); + } + if (likely(size <= tcache_maxclass)) { + return (tcache_alloc_large(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); + } + /* (size > tcache_maxclass) case falls through. */ + assert(size > tcache_maxclass); + } + + return (arena_malloc_hard(tsdn, arena, size, ind, zero)); +} + +JEMALLOC_ALWAYS_INLINE arena_t * +arena_aalloc(const void *ptr) +{ + arena_chunk_t *chunk; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) + return (extent_node_arena_get(&chunk->node)); + else + return (huge_aalloc(ptr)); +} + +/* Return the size of the allocation pointed to by ptr. */ +JEMALLOC_ALWAYS_INLINE size_t +arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) +{ + size_t ret; + arena_chunk_t *chunk; + size_t pageind; + szind_t binind; + + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + binind = arena_mapbits_binind_get(chunk, pageind); + if (unlikely(binind == BININD_INVALID || (config_prof && !demote + && arena_mapbits_large_get(chunk, pageind) != 0))) { + /* + * Large allocation. In the common case (demote), and + * as this is an inline function, most callers will only + * end up looking at binind to determine that ptr is a + * small allocation. + */ + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + ret = arena_mapbits_large_size_get(chunk, pageind) - + large_pad; + assert(ret != 0); + assert(pageind + ((ret+large_pad)>>LG_PAGE) <= + chunk_npages); + assert(arena_mapbits_dirty_get(chunk, pageind) == + arena_mapbits_dirty_get(chunk, + pageind+((ret+large_pad)>>LG_PAGE)-1)); + } else { + /* + * Small allocation (possibly promoted to a large + * object). + */ + assert(arena_mapbits_large_get(chunk, pageind) != 0 || + arena_ptr_small_binind_get(ptr, + arena_mapbits_get(chunk, pageind)) == binind); + ret = index2size(binind); + } + } else + ret = huge_salloc(tsdn, ptr); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) +{ + arena_chunk_t *chunk; + size_t pageind, mapbits; + + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapbits = arena_mapbits_get(chunk, pageind); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { + /* Small allocation. */ + if (likely(tcache != NULL)) { + szind_t binind = arena_ptr_small_binind_get(ptr, + mapbits); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); + } else { + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); + } + } else { + size_t size = arena_mapbits_large_size_get(chunk, + pageind); + + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + + if (likely(tcache != NULL) && size - large_pad <= + tcache_maxclass) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size - large_pad, slow_path); + } else { + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); + } + } + } else + huge_dalloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE void +arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) +{ + arena_chunk_t *chunk; + + assert(!tsdn_null(tsdn) || tcache == NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + if (config_prof && opt_prof) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> + LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != + 0); + if (arena_mapbits_large_get(chunk, pageind) != 0) { + /* + * Make sure to use promoted size, not request + * size. + */ + size = arena_mapbits_large_size_get(chunk, + pageind) - large_pad; + } + } + assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); + + if (likely(size <= SMALL_MAXCLASS)) { + /* Small allocation. */ + if (likely(tcache != NULL)) { + szind_t binind = size2index(size); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); + } else { + size_t pageind = ((uintptr_t)ptr - + (uintptr_t)chunk) >> LG_PAGE; + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); + } + } else { + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + + if (likely(tcache != NULL) && size <= tcache_maxclass) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size, slow_path); + } else { + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); + } + } + } else + huge_dalloc(tsdn, ptr); +} +# endif /* JEMALLOC_ARENA_INLINE_B */ +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/assert.h b/sources/jemalloc/include-win/jemalloc/internal/assert.h new file mode 100755 index 00000000..6f8f7eb9 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/assert.h @@ -0,0 +1,45 @@ +/* + * Define a custom assert() in order to reduce the chances of deadlock during + * assertion failure. + */ +#ifndef assert +#define assert(e) do { \ + if (unlikely(config_debug && !(e))) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef not_reached +#define not_reached() do { \ + if (config_debug) { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ + unreachable(); \ +} while (0) +#endif + +#ifndef not_implemented +#define not_implemented() do { \ + if (config_debug) { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef assert_not_implemented +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) \ + not_implemented(); \ +} while (0) +#endif + + diff --git a/sources/jemalloc/include-win/jemalloc/internal/atomic.h b/sources/jemalloc/include-win/jemalloc/internal/atomic.h new file mode 100755 index 00000000..3f15ea14 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/atomic.h @@ -0,0 +1,651 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#define atomic_read_uint64(p) atomic_add_uint64(p, 0) +#define atomic_read_uint32(p) atomic_add_uint32(p, 0) +#define atomic_read_p(p) atomic_add_p(p, NULL) +#define atomic_read_z(p) atomic_add_z(p, 0) +#define atomic_read_u(p) atomic_add_u(p, 0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +/* + * All arithmetic functions return the arithmetic result of the atomic + * operation. Some atomic operation APIs return the value prior to mutation, in + * which case the following functions must redundantly compute the result so + * that it can be returned. These functions are normally inlined, so the extra + * operations can be optimized away if the return values aren't used by the + * callers. + * + * atomic_read_( *p) { return (*p); } + * atomic_add_( *p, x) { return (*p += x); } + * atomic_sub_( *p, x) { return (*p -= x); } + * bool atomic_cas_( *p, c, s) + * { + * if (*p != c) + * return (true); + * *p = s; + * return (false); + * } + * void atomic_write_( *p, x) { *p = x; } + */ + +#ifndef JEMALLOC_ENABLE_INLINE +uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); +uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); +bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); +void atomic_write_uint64(uint64_t *p, uint64_t x); +uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); +uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); +bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); +void atomic_write_uint32(uint32_t *p, uint32_t x); +void *atomic_add_p(void **p, void *x); +void *atomic_sub_p(void **p, void *x); +bool atomic_cas_p(void **p, void *c, void *s); +void atomic_write_p(void **p, const void *x); +size_t atomic_add_z(size_t *p, size_t x); +size_t atomic_sub_z(size_t *p, size_t x); +bool atomic_cas_z(size_t *p, size_t c, size_t s); +void atomic_write_z(size_t *p, size_t x); +unsigned atomic_add_u(unsigned *p, unsigned x); +unsigned atomic_sub_u(unsigned *p, unsigned x); +bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); +void atomic_write_u(unsigned *p, unsigned x); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) +/******************************************************************************/ +/* 64-bit operations. */ +#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) +# if (defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + uint64_t t = x; + + asm volatile ( + "lock; xaddq %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + uint64_t t; + + x = (uint64_t)(-(int64_t)x); + t = x; + asm volatile ( + "lock; xaddq %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + uint8_t success; + + asm volatile ( + "lock; cmpxchgq %4, %0;" + "sete %1;" + : "=m" (*p), "=a" (success) /* Outputs. */ + : "m" (*p), "a" (c), "r" (s) /* Inputs. */ + : "memory" /* Clobbers. */ + ); + + return (!(bool)success); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + asm volatile ( + "xchgq %1, %0;" /* Lock is implied by xchgq. */ + : "=m" (*p), "+r" (x) /* Outputs. */ + : "m" (*p) /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +# elif (defined(JEMALLOC_C11ATOMICS)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (atomic_fetch_add(a, x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (atomic_fetch_sub(a, x) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (!atomic_compare_exchange_strong(a, &c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + atomic_store(a, x); +} +# elif (defined(JEMALLOC_ATOMIC9)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + /* + * atomic_fetchadd_64() doesn't exist, but we only ever use this + * function on LP64 systems, so atomic_fetchadd_long() will do. + */ + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (atomic_fetchadd_long(p, (unsigned long)x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + atomic_store_rel_long(p, x); +} +# elif (defined(JEMALLOC_OSATOMIC)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + uint64_t o; + + /*The documented OSAtomic*() API does not expose an atomic exchange. */ + do { + o = atomic_read_uint64(p); + } while (atomic_cas_uint64(p, o, x)); +} +# elif (defined(_MSC_VER)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (InterlockedExchangeAdd64(p, x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + uint64_t o; + + o = InterlockedCompareExchange64(p, s, c); + return (o != c); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + InterlockedExchange64(p, x); +} +# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ + defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + return (!__sync_bool_compare_and_swap(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + __sync_lock_test_and_set(p, x); +} +# else +# error "Missing implementation for 64-bit atomic operations" +# endif +#endif + +/******************************************************************************/ +/* 32-bit operations. */ +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + uint32_t t = x; + + asm volatile ( + "lock; xaddl %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + uint32_t t; + + x = (uint32_t)(-(int32_t)x); + t = x; + asm volatile ( + "lock; xaddl %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + uint8_t success; + + asm volatile ( + "lock; cmpxchgl %4, %0;" + "sete %1;" + : "=m" (*p), "=a" (success) /* Outputs. */ + : "m" (*p), "a" (c), "r" (s) /* Inputs. */ + : "memory" + ); + + return (!(bool)success); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + asm volatile ( + "xchgl %1, %0;" /* Lock is implied by xchgl. */ + : "=m" (*p), "+r" (x) /* Outputs. */ + : "m" (*p) /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +# elif (defined(JEMALLOC_C11ATOMICS)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (atomic_fetch_add(a, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (atomic_fetch_sub(a, x) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (!atomic_compare_exchange_strong(a, &c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + atomic_store(a, x); +} +#elif (defined(JEMALLOC_ATOMIC9)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (atomic_fetchadd_32(p, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!atomic_cmpset_32(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + atomic_store_rel_32(p, x); +} +#elif (defined(JEMALLOC_OSATOMIC)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + uint32_t o; + + /*The documented OSAtomic*() API does not expose an atomic exchange. */ + do { + o = atomic_read_uint32(p); + } while (atomic_cas_uint32(p, o, x)); +} +#elif (defined(_MSC_VER)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (InterlockedExchangeAdd(p, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + uint32_t o; + + o = InterlockedCompareExchange(p, s, c); + return (o != c); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + InterlockedExchange(p, x); +} +#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ + defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!__sync_bool_compare_and_swap(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + __sync_lock_test_and_set(p, x); +} +#else +# error "Missing implementation for 32-bit atomic operations" +#endif + +/******************************************************************************/ +/* Pointer operations. */ +JEMALLOC_INLINE void * +atomic_add_p(void **p, void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_PTR == 2) + return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE void * +atomic_sub_p(void **p, void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((void *)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_PTR == 2) + return ((void *)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_p(void **p, void *c, void *s) +{ + +#if (LG_SIZEOF_PTR == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_PTR == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_p(void **p, const void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_PTR == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +/* size_t operations. */ +JEMALLOC_INLINE size_t +atomic_add_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_PTR == 2) + return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE size_t +atomic_sub_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((size_t)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_PTR == 2) + return ((size_t)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_z(size_t *p, size_t c, size_t s) +{ + +#if (LG_SIZEOF_PTR == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_PTR == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_PTR == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +/* unsigned operations. */ +JEMALLOC_INLINE unsigned +atomic_add_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_INT == 2) + return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE unsigned +atomic_sub_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + return ((unsigned)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_INT == 2) + return ((unsigned)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_u(unsigned *p, unsigned c, unsigned s) +{ + +#if (LG_SIZEOF_INT == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_INT == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_INT == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/base.h b/sources/jemalloc/include-win/jemalloc/internal/base.h new file mode 100755 index 00000000..d6b81e16 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/base.h @@ -0,0 +1,25 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *base_alloc(tsdn_t *tsdn, size_t size); +void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, + size_t *mapped); +bool base_boot(void); +void base_prefork(tsdn_t *tsdn); +void base_postfork_parent(tsdn_t *tsdn); +void base_postfork_child(tsdn_t *tsdn); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/bitmap.h b/sources/jemalloc/include-win/jemalloc/internal/bitmap.h new file mode 100755 index 00000000..36f38b59 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/bitmap.h @@ -0,0 +1,274 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ +#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS +#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) + +typedef struct bitmap_level_s bitmap_level_t; +typedef struct bitmap_info_s bitmap_info_t; +typedef unsigned long bitmap_t; +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG + +/* Number of bits per group. */ +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) + +/* + * Do some analysis on how big the bitmap is before we use a tree. For a brute + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. + */ +#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 +# define USE_TREE +#endif + +/* Number of groups required to store a given number of bits. */ +#define BITMAP_BITS2GROUPS(nbits) \ + ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) + +/* + * Number of groups required at a particular level for a given number of bits. + */ +#define BITMAP_GROUPS_L0(nbits) \ + BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_L1(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) +#define BITMAP_GROUPS_L2(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) +#define BITMAP_GROUPS_L3(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS((nbits))))) + +/* + * Assuming the number of levels, number of groups required for a given number + * of bits. + */ +#define BITMAP_GROUPS_1_LEVEL(nbits) \ + BITMAP_GROUPS_L0(nbits) +#define BITMAP_GROUPS_2_LEVEL(nbits) \ + (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) +#define BITMAP_GROUPS_3_LEVEL(nbits) \ + (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) +#define BITMAP_GROUPS_4_LEVEL(nbits) \ + (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) + +/* + * Maximum number of groups required to support LG_BITMAP_MAXBITS. + */ +#ifdef USE_TREE + +#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) +#else +# error "Unsupported bitmap size" +#endif + +/* Maximum number of levels possible. */ +#define BITMAP_MAX_LEVELS \ + (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) + +#else /* USE_TREE */ + +#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) + +#endif /* USE_TREE */ + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct bitmap_level_s { + /* Offset of this level's groups within the array of groups. */ + size_t group_offset; +}; + +struct bitmap_info_s { + /* Logical number of bits in bitmap (stored at bottom level). */ + size_t nbits; + +#ifdef USE_TREE + /* Number of levels necessary for nbits. */ + unsigned nlevels; + + /* + * Only the first (nlevels+1) elements are used, and levels are ordered + * bottom to top (e.g. the bottom level is stored in levels[0]). + */ + bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; +#else /* USE_TREE */ + /* Number of groups necessary for nbits. */ + size_t ngroups; +#endif /* USE_TREE */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); +size_t bitmap_size(const bitmap_info_t *binfo); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); +bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); +void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) +JEMALLOC_INLINE bool +bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ +#ifdef USE_TREE + size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; + bitmap_t rg = bitmap[rgoff]; + /* The bitmap is full iff the root group is 0. */ + return (rg == 0); +#else + size_t i; + + for (i = 0; i < binfo->ngroups; i++) { + if (bitmap[i] != 0) + return (false); + } + return (true); +#endif +} + +JEMALLOC_INLINE bool +bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t g; + + assert(bit < binfo->nbits); + goff = bit >> LG_BITMAP_GROUP_NBITS; + g = bitmap[goff]; + return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))); +} + +JEMALLOC_INLINE void +bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t *gp; + bitmap_t g; + + assert(bit < binfo->nbits); + assert(!bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(bitmap_get(bitmap, binfo, bit)); +#ifdef USE_TREE + /* Propagate group state transitions up the tree. */ + if (g == 0) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (g != 0) + break; + } + } +#endif +} + +/* sfu: set first unset. */ +JEMALLOC_INLINE size_t +bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t bit; + bitmap_t g; + unsigned i; + + assert(!bitmap_full(bitmap, binfo)); + +#ifdef USE_TREE + i = binfo->nlevels - 1; + g = bitmap[binfo->levels[i].group_offset]; + bit = ffs_lu(g) - 1; + while (i > 0) { + i--; + g = bitmap[binfo->levels[i].group_offset + bit]; + bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); + } +#else + i = 0; + g = bitmap[0]; + while ((bit = ffs_lu(g)) == 0) { + i++; + g = bitmap[i]; + } + bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); +#endif + bitmap_set(bitmap, binfo, bit); + return (bit); +} + +JEMALLOC_INLINE void +bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t *gp; + bitmap_t g; + UNUSED bool propagate; + + assert(bit < binfo->nbits); + assert(bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + propagate = (g == 0); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(!bitmap_get(bitmap, binfo, bit)); +#ifdef USE_TREE + /* Propagate group state transitions up the tree. */ + if (propagate) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + propagate = (g == 0); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) + == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (!propagate) + break; + } + } +#endif /* USE_TREE */ +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/chunk.h b/sources/jemalloc/include-win/jemalloc/internal/chunk.h new file mode 100755 index 00000000..55df9acc --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/chunk.h @@ -0,0 +1,97 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * Size and alignment of memory chunks that are allocated by the OS's virtual + * memory system. + */ +#define LG_CHUNK_DEFAULT 21 + +/* Return the chunk address for allocation address a. */ +#define CHUNK_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~chunksize_mask)) + +/* Return the chunk offset of address a. */ +#define CHUNK_ADDR2OFFSET(a) \ + ((size_t)((uintptr_t)(a) & chunksize_mask)) + +/* Return the smallest chunk multiple that is >= s. */ +#define CHUNK_CEILING(s) \ + (((s) + chunksize_mask) & ~chunksize_mask) + +#define CHUNK_HOOKS_INITIALIZER { \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL \ +} + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern size_t opt_lg_chunk; +extern const char *opt_dss; + +extern rtree_t chunks_rtree; + +extern size_t chunksize; +extern size_t chunksize_mask; /* (chunksize - 1). */ +extern size_t chunk_npages; + +extern const chunk_hooks_t chunk_hooks_default; + +chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena); +chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, + const chunk_hooks_t *chunk_hooks); + +bool chunk_register(const void *chunk, const extent_node_t *node, + bool *gdump); +void chunk_deregister(const void *chunk, const extent_node_t *node); +void *chunk_alloc_base(size_t size); +void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, + size_t *sn, bool *zero, bool *commit, bool dalloc_node); +void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, + size_t *sn, bool *zero, bool *commit); +void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, + bool committed); +void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, + bool zeroed, bool committed); +bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, + size_t length); +bool chunk_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +extent_node_t *chunk_lookup(const void *chunk, bool dependent); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) +JEMALLOC_INLINE extent_node_t * +chunk_lookup(const void *ptr, bool dependent) +{ + + return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent)); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + +#include "jemalloc/internal/chunk_dss.h" +#include "jemalloc/internal/chunk_mmap.h" diff --git a/sources/jemalloc/include-win/jemalloc/internal/chunk_dss.h b/sources/jemalloc/include-win/jemalloc/internal/chunk_dss.h new file mode 100755 index 00000000..da8511ba --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/chunk_dss.h @@ -0,0 +1,37 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef enum { + dss_prec_disabled = 0, + dss_prec_primary = 1, + dss_prec_secondary = 2, + + dss_prec_limit = 3 +} dss_prec_t; +#define DSS_PREC_DEFAULT dss_prec_secondary +#define DSS_DEFAULT "secondary" + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +extern const char *dss_prec_names[]; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +dss_prec_t chunk_dss_prec_get(void); +bool chunk_dss_prec_set(dss_prec_t dss_prec); +void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit); +bool chunk_in_dss(void *chunk); +bool chunk_dss_mergeable(void *chunk_a, void *chunk_b); +void chunk_dss_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/chunk_mmap.h b/sources/jemalloc/include-win/jemalloc/internal/chunk_mmap.h new file mode 100755 index 00000000..6f2d0ac2 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/chunk_mmap.h @@ -0,0 +1,21 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit); +bool chunk_dalloc_mmap(void *chunk, size_t size); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/ckh.h b/sources/jemalloc/include-win/jemalloc/internal/ckh.h new file mode 100755 index 00000000..f75ad90b --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/ckh.h @@ -0,0 +1,86 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ckh_s ckh_t; +typedef struct ckhc_s ckhc_t; + +/* Typedefs to allow easy function pointer passing. */ +typedef void ckh_hash_t (const void *, size_t[2]); +typedef bool ckh_keycomp_t (const void *, const void *); + +/* Maintain counters used to get an idea of performance. */ +/* #define CKH_COUNT */ +/* Print counter values in ckh_delete() (requires CKH_COUNT). */ +/* #define CKH_VERBOSE */ + +/* + * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit + * one bucket per L1 cache line. + */ +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Hash table cell. */ +struct ckhc_s { + const void *key; + const void *data; +}; + +struct ckh_s { +#ifdef CKH_COUNT + /* Counters used to get an idea of performance. */ + uint64_t ngrows; + uint64_t nshrinks; + uint64_t nshrinkfails; + uint64_t ninserts; + uint64_t nrelocs; +#endif + + /* Used for pseudo-random number generation. */ + uint64_t prng_state; + + /* Total number of items. */ + size_t count; + + /* + * Minimum and current number of hash table buckets. There are + * 2^LG_CKH_BUCKET_CELLS cells per bucket. + */ + unsigned lg_minbuckets; + unsigned lg_curbuckets; + + /* Hash and comparison functions. */ + ckh_hash_t *hash; + ckh_keycomp_t *keycomp; + + /* Hash table with 2^lg_curbuckets buckets. */ + ckhc_t *tab; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp); +void ckh_delete(tsd_t *tsd, ckh_t *ckh); +size_t ckh_count(ckh_t *ckh); +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); +bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data); +bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); +void ckh_string_hash(const void *key, size_t r_hash[2]); +bool ckh_string_keycomp(const void *k1, const void *k2); +void ckh_pointer_hash(const void *key, size_t r_hash[2]); +bool ckh_pointer_keycomp(const void *k1, const void *k2); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/ctl.h b/sources/jemalloc/include-win/jemalloc/internal/ctl.h new file mode 100755 index 00000000..af0f6d7c --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/ctl.h @@ -0,0 +1,118 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ctl_node_s ctl_node_t; +typedef struct ctl_named_node_s ctl_named_node_t; +typedef struct ctl_indexed_node_s ctl_indexed_node_t; +typedef struct ctl_arena_stats_s ctl_arena_stats_t; +typedef struct ctl_stats_s ctl_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct ctl_node_s { + bool named; +}; + +struct ctl_named_node_s { + struct ctl_node_s node; + const char *name; + /* If (nchildren == 0), this is a terminal node. */ + unsigned nchildren; + const ctl_node_t *children; + int (*ctl)(tsd_t *, const size_t *, size_t, void *, + size_t *, void *, size_t); +}; + +struct ctl_indexed_node_s { + struct ctl_node_s node; + const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, + size_t); +}; + +struct ctl_arena_stats_s { + bool initialized; + unsigned nthreads; + const char *dss; + ssize_t lg_dirty_mult; + ssize_t decay_time; + size_t pactive; + size_t pdirty; + + /* The remainder are only populated if config_stats is true. */ + + arena_stats_t astats; + + /* Aggregate stats for small size classes, based on bin stats. */ + size_t allocated_small; + uint64_t nmalloc_small; + uint64_t ndalloc_small; + uint64_t nrequests_small; + + malloc_bin_stats_t bstats[NBINS]; + malloc_large_stats_t *lstats; /* nlclasses elements. */ + malloc_huge_stats_t *hstats; /* nhclasses elements. */ +}; + +struct ctl_stats_s { + size_t allocated; + size_t active; + size_t metadata; + size_t resident; + size_t mapped; + size_t retained; + unsigned narenas; + ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, + size_t *miblenp); + +int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); +bool ctl_boot(void); +void ctl_prefork(tsdn_t *tsdn); +void ctl_postfork_parent(tsdn_t *tsdn); +void ctl_postfork_child(tsdn_t *tsdn); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_printf( \ + ": Failure in xmallctl(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlnametomib(name, mibp, miblenp) do { \ + if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_printf(": Failure in " \ + "xmallctlnametomib(\"%s\", ...)\n", name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ + newlen) != 0) { \ + malloc_write( \ + ": Failure in xmallctlbymib()\n"); \ + abort(); \ + } \ +} while (0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-win/jemalloc/internal/extent.h b/sources/jemalloc/include-win/jemalloc/internal/extent.h new file mode 100755 index 00000000..fc77f9f5 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/extent.h @@ -0,0 +1,275 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct extent_node_s extent_node_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Tree of extents. Use accessor functions for en_* fields. */ +struct extent_node_s { + /* Arena from which this extent came, if any. */ + arena_t *en_arena; + + /* Pointer to the extent that this tree node is responsible for. */ + void *en_addr; + + /* Total region size. */ + size_t en_size; + + /* + * Serial number (potentially non-unique). + * + * In principle serial numbers can wrap around on 32-bit systems if + * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall + * back on address comparison for equal serial numbers, stable (if + * imperfect) ordering is maintained. + * + * Serial numbers may not be unique even in the absence of wrap-around, + * e.g. when splitting an extent and assigning the same serial number to + * both resulting adjacent extents. + */ + size_t en_sn; + + /* + * The zeroed flag is used by chunk recycling code to track whether + * memory is zero-filled. + */ + bool en_zeroed; + + /* + * True if physical memory is committed to the extent, whether + * explicitly or implicitly as on a system that overcommits and + * satisfies physical memory needs on demand via soft page faults. + */ + bool en_committed; + + /* + * The achunk flag is used to validate that huge allocation lookups + * don't return arena chunks. + */ + bool en_achunk; + + /* Profile counters, used for huge objects. */ + prof_tctx_t *en_prof_tctx; + + /* Linkage for arena's runs_dirty and chunks_cache rings. */ + arena_runs_dirty_link_t rd; + qr(extent_node_t) cc_link; + + union { + /* Linkage for the size/sn/address-ordered tree. */ + rb_node(extent_node_t) szsnad_link; + + /* Linkage for arena's achunks, huge, and node_cache lists. */ + ql_elm(extent_node_t) ql_link; + }; + + /* Linkage for the address-ordered tree. */ + rb_node(extent_node_t) ad_link; +}; +typedef rb_tree(extent_node_t) extent_tree_t; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_JET +size_t extent_size_quantize_floor(size_t size); +#endif +size_t extent_size_quantize_ceil(size_t size); + +rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t) + +rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +arena_t *extent_node_arena_get(const extent_node_t *node); +void *extent_node_addr_get(const extent_node_t *node); +size_t extent_node_size_get(const extent_node_t *node); +size_t extent_node_sn_get(const extent_node_t *node); +bool extent_node_zeroed_get(const extent_node_t *node); +bool extent_node_committed_get(const extent_node_t *node); +bool extent_node_achunk_get(const extent_node_t *node); +prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); +void extent_node_arena_set(extent_node_t *node, arena_t *arena); +void extent_node_addr_set(extent_node_t *node, void *addr); +void extent_node_size_set(extent_node_t *node, size_t size); +void extent_node_sn_set(extent_node_t *node, size_t sn); +void extent_node_zeroed_set(extent_node_t *node, bool zeroed); +void extent_node_committed_set(extent_node_t *node, bool committed); +void extent_node_achunk_set(extent_node_t *node, bool achunk); +void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); +void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, + size_t size, size_t sn, bool zeroed, bool committed); +void extent_node_dirty_linkage_init(extent_node_t *node); +void extent_node_dirty_insert(extent_node_t *node, + arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); +void extent_node_dirty_remove(extent_node_t *node); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_)) +JEMALLOC_INLINE arena_t * +extent_node_arena_get(const extent_node_t *node) +{ + + return (node->en_arena); +} + +JEMALLOC_INLINE void * +extent_node_addr_get(const extent_node_t *node) +{ + + return (node->en_addr); +} + +JEMALLOC_INLINE size_t +extent_node_size_get(const extent_node_t *node) +{ + + return (node->en_size); +} + +JEMALLOC_INLINE size_t +extent_node_sn_get(const extent_node_t *node) +{ + + return (node->en_sn); +} + +JEMALLOC_INLINE bool +extent_node_zeroed_get(const extent_node_t *node) +{ + + return (node->en_zeroed); +} + +JEMALLOC_INLINE bool +extent_node_committed_get(const extent_node_t *node) +{ + + assert(!node->en_achunk); + return (node->en_committed); +} + +JEMALLOC_INLINE bool +extent_node_achunk_get(const extent_node_t *node) +{ + + return (node->en_achunk); +} + +JEMALLOC_INLINE prof_tctx_t * +extent_node_prof_tctx_get(const extent_node_t *node) +{ + + return (node->en_prof_tctx); +} + +JEMALLOC_INLINE void +extent_node_arena_set(extent_node_t *node, arena_t *arena) +{ + + node->en_arena = arena; +} + +JEMALLOC_INLINE void +extent_node_addr_set(extent_node_t *node, void *addr) +{ + + node->en_addr = addr; +} + +JEMALLOC_INLINE void +extent_node_size_set(extent_node_t *node, size_t size) +{ + + node->en_size = size; +} + +JEMALLOC_INLINE void +extent_node_sn_set(extent_node_t *node, size_t sn) +{ + + node->en_sn = sn; +} + +JEMALLOC_INLINE void +extent_node_zeroed_set(extent_node_t *node, bool zeroed) +{ + + node->en_zeroed = zeroed; +} + +JEMALLOC_INLINE void +extent_node_committed_set(extent_node_t *node, bool committed) +{ + + node->en_committed = committed; +} + +JEMALLOC_INLINE void +extent_node_achunk_set(extent_node_t *node, bool achunk) +{ + + node->en_achunk = achunk; +} + +JEMALLOC_INLINE void +extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) +{ + + node->en_prof_tctx = tctx; +} + +JEMALLOC_INLINE void +extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, + size_t sn, bool zeroed, bool committed) +{ + + extent_node_arena_set(node, arena); + extent_node_addr_set(node, addr); + extent_node_size_set(node, size); + extent_node_sn_set(node, sn); + extent_node_zeroed_set(node, zeroed); + extent_node_committed_set(node, committed); + extent_node_achunk_set(node, false); + if (config_prof) + extent_node_prof_tctx_set(node, NULL); +} + +JEMALLOC_INLINE void +extent_node_dirty_linkage_init(extent_node_t *node) +{ + + qr_new(&node->rd, rd_link); + qr_new(node, cc_link); +} + +JEMALLOC_INLINE void +extent_node_dirty_insert(extent_node_t *node, + arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty) +{ + + qr_meld(runs_dirty, &node->rd, rd_link); + qr_meld(chunks_dirty, node, cc_link); +} + +JEMALLOC_INLINE void +extent_node_dirty_remove(extent_node_t *node) +{ + + qr_remove(&node->rd, rd_link); + qr_remove(node, cc_link); +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-win/jemalloc/internal/hash.h b/sources/jemalloc/include-win/jemalloc/internal/hash.h new file mode 100755 index 00000000..1ff2d9a0 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/hash.h @@ -0,0 +1,357 @@ +/* + * The following hash function is based on MurmurHash3, placed into the public + * domain by Austin Appleby. See https://github.com/aappleby/smhasher for + * details. + */ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +uint32_t hash_x86_32(const void *key, int len, uint32_t seed); +void hash_x86_128(const void *key, const int len, uint32_t seed, + uint64_t r_out[2]); +void hash_x64_128(const void *key, const int len, const uint32_t seed, + uint64_t r_out[2]); +void hash(const void *key, size_t len, const uint32_t seed, + size_t r_hash[2]); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) +/******************************************************************************/ +/* Internal implementation. */ +JEMALLOC_INLINE uint32_t +hash_rotl_32(uint32_t x, int8_t r) +{ + + return ((x << r) | (x >> (32 - r))); +} + +JEMALLOC_INLINE uint64_t +hash_rotl_64(uint64_t x, int8_t r) +{ + + return ((x << r) | (x >> (64 - r))); +} + +JEMALLOC_INLINE uint32_t +hash_get_block_32(const uint32_t *p, int i) +{ + + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { + uint32_t ret; + + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); + return (ret); + } + + return (p[i]); +} + +JEMALLOC_INLINE uint64_t +hash_get_block_64(const uint64_t *p, int i) +{ + + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { + uint64_t ret; + + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); + return (ret); + } + + return (p[i]); +} + +JEMALLOC_INLINE uint32_t +hash_fmix_32(uint32_t h) +{ + + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return (h); +} + +JEMALLOC_INLINE uint64_t +hash_fmix_64(uint64_t k) +{ + + k ^= k >> 33; + k *= KQU(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= KQU(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + + return (k); +} + +JEMALLOC_INLINE uint32_t +hash_x86_32(const void *key, int len, uint32_t seed) +{ + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i); + + k1 *= c1; + k1 = hash_rotl_32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = hash_rotl_32(h1, 13); + h1 = h1*5 + 0xe6546b64; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*4); + + uint32_t k1 = 0; + + switch (len & 3) { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); + k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; + + h1 = hash_fmix_32(h1); + + return (h1); +} + +UNUSED JEMALLOC_INLINE void +hash_x86_128(const void *key, const int len, uint32_t seed, + uint64_t r_out[2]) +{ + const uint8_t * data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); + uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); + uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); + uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); + + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_32(h1, 19); h1 += h2; + h1 = h1*5 + 0x561ccd1b; + + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + + h2 = hash_rotl_32(h2, 17); h2 += h3; + h2 = h2*5 + 0x0bcaa747; + + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + + h3 = hash_rotl_32(h3, 15); h3 += h4; + h3 = h3*5 + 0x96cd1c35; + + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + + h4 = hash_rotl_32(h4, 13); h4 += h1; + h4 = h4*5 + 0x32ac3b17; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*16); + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch (len & 15) { + case 15: k4 ^= tail[14] << 16; + case 14: k4 ^= tail[13] << 8; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + + case 12: k3 ^= tail[11] << 24; + case 11: k3 ^= tail[10] << 16; + case 10: k3 ^= tail[ 9] << 8; + case 9: k3 ^= tail[ 8] << 0; + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + + case 8: k2 ^= tail[ 7] << 24; + case 7: k2 ^= tail[ 6] << 16; + case 6: k2 ^= tail[ 5] << 8; + case 5: k2 ^= tail[ 4] << 0; + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + + case 4: k1 ^= tail[ 3] << 24; + case 3: k1 ^= tail[ 2] << 16; + case 2: k1 ^= tail[ 1] << 8; + case 1: k1 ^= tail[ 0] << 0; + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = hash_fmix_32(h1); + h2 = hash_fmix_32(h2); + h3 = hash_fmix_32(h3); + h4 = hash_fmix_32(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + r_out[0] = (((uint64_t) h2) << 32) | h1; + r_out[1] = (((uint64_t) h4) << 32) | h3; +} + +UNUSED JEMALLOC_INLINE void +hash_x64_128(const void *key, const int len, const uint32_t seed, + uint64_t r_out[2]) +{ + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + + const uint64_t c1 = KQU(0x87c37b91114253d5); + const uint64_t c2 = KQU(0x4cf5ad432745937f); + + /* body */ + { + const uint64_t *blocks = (const uint64_t *) (data); + int i; + + for (i = 0; i < nblocks; i++) { + uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); + uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); + + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_64(h1, 27); h1 += h2; + h1 = h1*5 + 0x52dce729; + + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + + h2 = hash_rotl_64(h2, 31); h2 += h1; + h2 = h2*5 + 0x38495ab5; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t*)(data + nblocks*16); + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch (len & 15) { + case 15: k2 ^= ((uint64_t)(tail[14])) << 48; + case 14: k2 ^= ((uint64_t)(tail[13])) << 40; + case 13: k2 ^= ((uint64_t)(tail[12])) << 32; + case 12: k2 ^= ((uint64_t)(tail[11])) << 24; + case 11: k2 ^= ((uint64_t)(tail[10])) << 16; + case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; + case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + + case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; + case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; + case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; + case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; + case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; + case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; + case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; + case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = hash_fmix_64(h1); + h2 = hash_fmix_64(h2); + + h1 += h2; + h2 += h1; + + r_out[0] = h1; + r_out[1] = h2; +} + +/******************************************************************************/ +/* API. */ +JEMALLOC_INLINE void +hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) +{ + + assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ + +#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) + hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); +#else + { + uint64_t hashes[2]; + hash_x86_128(key, (int)len, seed, hashes); + r_hash[0] = (size_t)hashes[0]; + r_hash[1] = (size_t)hashes[1]; + } +#endif +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/huge.h b/sources/jemalloc/include-win/jemalloc/internal/huge.h new file mode 100755 index 00000000..22184d9b --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/huge.h @@ -0,0 +1,35 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); +void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero); +bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize_min, size_t usize_max, bool zero); +void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache); +#ifdef JEMALLOC_JET +typedef void (huge_dalloc_junk_t)(void *, size_t); +extern huge_dalloc_junk_t *huge_dalloc_junk; +#endif +void huge_dalloc(tsdn_t *tsdn, void *ptr); +arena_t *huge_aalloc(const void *ptr); +size_t huge_salloc(tsdn_t *tsdn, const void *ptr); +prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx); +void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h new file mode 100755 index 00000000..f8df7996 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h @@ -0,0 +1,1294 @@ +#ifndef JEMALLOC_INTERNAL_H +#define JEMALLOC_INTERNAL_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) je_##n +# include "../jemalloc.h" +#endif +#include "jemalloc/internal/private_namespace.h" + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_munmap = +#ifdef JEMALLOC_MUNMAP + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; +static const bool config_thp = +#ifdef JEMALLOC_THP + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_valgrind = +#ifdef JEMALLOC_VALGRIND + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_ivsalloc = +#ifdef JEMALLOC_IVSALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; + +#ifdef JEMALLOC_C11ATOMICS +#include +#endif + +#ifdef JEMALLOC_ATOMIC9 +#include +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#endif + +#include "jemalloc/internal/ph.h" +#ifndef __PGI +#define RB_COMPACT +#endif +#include "jemalloc/internal/rb.h" +#include "jemalloc/internal/qr.h" +#include "jemalloc/internal/ql.h" + +/* + * jemalloc can conceptually be broken into components (arena, tcache, etc.), + * but there are circular dependencies that cannot be broken without + * substantial performance degradation. In order to reduce the effect on + * visual code flow, read the header files in multiple passes, with one of the + * following cpp variables defined during each pass: + * + * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * types. + * JEMALLOC_H_STRUCTS : Data structures. + * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. + * JEMALLOC_H_INLINES : Inline functions. + */ +/******************************************************************************/ +#define JEMALLOC_H_TYPES + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* Page size index type. */ +typedef unsigned pszind_t; + +/* Size class index type. */ +typedef unsigned szind_t; + +/* + * Flags bits: + * + * a: arena + * t: tcache + * 0: unused + * z: zero + * n: alignment + * + * aaaaaaaa aaaatttt tttttttt 0znnnnnn + */ +#define MALLOCX_ARENA_MASK ((int)~0xfffff) +#define MALLOCX_ARENA_MAX 0xffe +#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU) +#define MALLOCX_TCACHE_MAX 0xffd +#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) +/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ +#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ + (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) +#define MALLOCX_ALIGN_GET(flags) \ + (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) +#define MALLOCX_ZERO_GET(flags) \ + ((bool)(flags & MALLOCX_ZERO)) + +#define MALLOCX_TCACHE_GET(flags) \ + (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2) +#define MALLOCX_ARENA_GET(flags) \ + (((unsigned)(((unsigned)flags) >> 20)) - 1) + +/* Smallest size class to support. */ +#define TINY_MIN (1U << LG_TINY_MIN) + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#ifndef LG_QUANTUM +# if (defined(__i386__) || defined(_M_IX86)) +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# ifdef __aarch64__ +# define LG_QUANTUM 4 +# endif +# ifdef __hppa__ +# define LG_QUANTUM 4 +# endif +# ifdef __mips__ +# define LG_QUANTUM 3 +# endif +# ifdef __or1k__ +# define LG_QUANTUM 3 +# endif +# ifdef __powerpc__ +# define LG_QUANTUM 4 +# endif +# ifdef __riscv__ +# define LG_QUANTUM 4 +# endif +# ifdef __s390__ +# define LG_QUANTUM 4 +# endif +# ifdef __SH4__ +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifdef __le32__ +# define LG_QUANTUM 4 +# endif +# ifndef LG_QUANTUM +# error "Unknown minimum alignment for architecture; specify via " + "--with-lg-quantum" +# endif +#endif + +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) + +/* Return the smallest quantum multiple that is >= a. */ +#define QUANTUM_CEILING(a) \ + (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) + +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) + +/* Return the smallest long multiple that is >= a. */ +#define LONG_CEILING(a) \ + (((a) + LONG_MASK) & ~LONG_MASK) + +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) + +/* Return the smallest (void *) multiple that is >= a. */ +#define PTR_CEILING(a) \ + (((a) + PTR_MASK) & ~PTR_MASK) + +/* + * Maximum size of L1 cache line. This is used to avoid cache line aliasing. + * In addition, this controls the spacing of cacheline-spaced size classes. + * + * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can + * only handle raw constants. + */ +#define LG_CACHELINE 6 +#define CACHELINE 64 +#define CACHELINE_MASK (CACHELINE - 1) + +/* Return the smallest cacheline multiple that is >= s. */ +#define CACHELINE_CEILING(s) \ + (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) + +/* Page size. LG_PAGE is determined by the configure script. */ +#ifdef PAGE_MASK +# undef PAGE_MASK +#endif +#define PAGE ((size_t)(1U << LG_PAGE)) +#define PAGE_MASK ((size_t)(PAGE - 1)) + +/* Return the page base address for the page containing address a. */ +#define PAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~PAGE_MASK)) + +/* Return the smallest pagesize multiple that is >= s. */ +#define PAGE_CEILING(s) \ + (((s) + PAGE_MASK) & ~PAGE_MASK) + +/* Return the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) + +/* Return the offset between a and the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ + ((size_t)((uintptr_t)(a) & (alignment - 1))) + +/* Return the smallest alignment multiple that is >= s. */ +#define ALIGNMENT_CEILING(s, alignment) \ + (((s) + (alignment - 1)) & ((~(alignment)) + 1)) + +/* Declare a variable-length array. */ +#if __STDC_VERSION__ < 199901L +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef JEMALLOC_HAS_ALLOCA_H +# include +# else +# include +# endif +# endif +# define VARIABLE_ARRAY(type, name, count) \ + type *name = alloca(sizeof(type) * (count)) +#else +# define VARIABLE_ARRAY(type, name, count) type name[(count)] +#endif + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_TYPES +/******************************************************************************/ +#define JEMALLOC_H_STRUCTS + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#define JEMALLOC_ARENA_STRUCTS_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_STRUCTS_A +#include "jemalloc/internal/extent.h" +#define JEMALLOC_ARENA_STRUCTS_B +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_STRUCTS_B +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#include "jemalloc/internal/tsd.h" + +#undef JEMALLOC_H_STRUCTS +/******************************************************************************/ +#define JEMALLOC_H_EXTERNS + +extern bool opt_abort; +extern const char *opt_junk; +extern bool opt_junk_alloc; +extern bool opt_junk_free; +extern size_t opt_quarantine; +extern bool opt_redzone; +extern bool opt_utrace; +extern bool opt_xmalloc; +extern bool opt_zero; +extern unsigned opt_narenas; + +extern bool in_valgrind; + +/* Number of CPUs. */ +extern unsigned ncpus; + +/* Number of arenas used for automatic multiplexing of threads and arenas. */ +extern unsigned narenas_auto; + +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + */ +extern arena_t **arenas; + +/* + * pind2sz_tab encodes the same information as could be computed by + * pind2sz_compute(). + */ +extern size_t const pind2sz_tab[NPSIZES]; +/* + * index2size_tab encodes the same information as could be computed (at + * unacceptable cost in some code paths) by index2size_compute(). + */ +extern size_t const index2size_tab[NSIZES]; +/* + * size2index_tab is a compact lookup table that rounds request sizes up to + * size classes. In order to reduce cache footprint, the table is compressed, + * and all accesses are via size2index(). + */ +extern uint8_t const size2index_tab[]; + +arena_t *a0get(void); +void *a0malloc(size_t size); +void a0dalloc(void *ptr); +void *bootstrap_malloc(size_t size); +void *bootstrap_calloc(size_t num, size_t size); +void bootstrap_free(void *ptr); +unsigned narenas_total_get(void); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind); +arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); +arena_t *arena_choose_hard(tsd_t *tsd, bool internal); +void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); +void thread_allocated_cleanup(tsd_t *tsd); +void thread_deallocated_cleanup(tsd_t *tsd); +void iarena_cleanup(tsd_t *tsd); +void arena_cleanup(tsd_t *tsd); +void arenas_tdata_cleanup(tsd_t *tsd); +void narenas_tdata_cleanup(tsd_t *tsd); +void arenas_tdata_bypass_cleanup(tsd_t *tsd); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" +#include "jemalloc/internal/tsd.h" + +#undef JEMALLOC_H_EXTERNS +/******************************************************************************/ +#define JEMALLOC_H_INLINES + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" + +#ifndef JEMALLOC_ENABLE_INLINE +pszind_t psz2ind(size_t psz); +size_t pind2sz_compute(pszind_t pind); +size_t pind2sz_lookup(pszind_t pind); +size_t pind2sz(pszind_t pind); +size_t psz2u(size_t psz); +szind_t size2index_compute(size_t size); +szind_t size2index_lookup(size_t size); +szind_t size2index(size_t size); +size_t index2size_compute(szind_t index); +size_t index2size_lookup(szind_t index); +size_t index2size(szind_t index); +size_t s2u_compute(size_t size); +size_t s2u_lookup(size_t size); +size_t s2u(size_t size); +size_t sa2u(size_t size, size_t alignment); +arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal); +arena_t *arena_choose(tsd_t *tsd, arena_t *arena); +arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena); +arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, + bool refresh_if_missing); +arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing); +ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_INLINE pszind_t +psz2ind(size_t psz) +{ + + if (unlikely(psz > HUGE_MAXCLASS)) + return (NPSIZES); + { + pszind_t x = lg_floor((psz<<1)-1); + pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - + (LG_SIZE_CLASS_GROUP + LG_PAGE); + pszind_t grp = shift << LG_SIZE_CLASS_GROUP; + + pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZI(-1) << lg_delta; + pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + pszind_t ind = grp + mod; + return (ind); + } +} + +JEMALLOC_INLINE size_t +pind2sz_compute(pszind_t pind) +{ + + { + size_t grp = pind >> LG_SIZE_CLASS_GROUP; + size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_PAGE + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_PAGE-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t sz = grp_size + mod_size; + return (sz); + } +} + +JEMALLOC_INLINE size_t +pind2sz_lookup(pszind_t pind) +{ + size_t ret = (size_t)pind2sz_tab[pind]; + assert(ret == pind2sz_compute(pind)); + return (ret); +} + +JEMALLOC_INLINE size_t +pind2sz(pszind_t pind) +{ + + assert(pind < NPSIZES); + return (pind2sz_lookup(pind)); +} + +JEMALLOC_INLINE size_t +psz2u(size_t psz) +{ + + if (unlikely(psz > HUGE_MAXCLASS)) + return (0); + { + size_t x = lg_floor((psz<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (psz + delta_mask) & ~delta_mask; + return (usize); + } +} + +JEMALLOC_INLINE szind_t +size2index_compute(size_t size) +{ + + if (unlikely(size > HUGE_MAXCLASS)) + return (NSIZES); +#if (NTBINS != 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); + } +#endif + { + szind_t x = lg_floor((size<<1)-1); + szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : + x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); + szind_t grp = shift << LG_SIZE_CLASS_GROUP; + + szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZI(-1) << lg_delta; + szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + szind_t index = NTBINS + grp + mod; + return (index); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +size2index_lookup(size_t size) +{ + + assert(size <= LOOKUP_MAXCLASS); + { + szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]); + assert(ret == size2index_compute(size)); + return (ret); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +size2index(size_t size) +{ + + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) + return (size2index_lookup(size)); + return (size2index_compute(size)); +} + +JEMALLOC_INLINE size_t +index2size_compute(szind_t index) +{ + +#if (NTBINS > 0) + if (index < NTBINS) + return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); +#endif + { + size_t reduced_index = index - NTBINS; + size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; + size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - + 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_QUANTUM + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_QUANTUM-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t usize = grp_size + mod_size; + return (usize); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +index2size_lookup(szind_t index) +{ + size_t ret = (size_t)index2size_tab[index]; + assert(ret == index2size_compute(index)); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +index2size(szind_t index) +{ + + assert(index < NSIZES); + return (index2size_lookup(index)); +} + +JEMALLOC_ALWAYS_INLINE size_t +s2u_compute(size_t size) +{ + + if (unlikely(size > HUGE_MAXCLASS)) + return (0); +#if (NTBINS > 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : + (ZU(1) << lg_ceil)); + } +#endif + { + size_t x = lg_floor((size<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (size + delta_mask) & ~delta_mask; + return (usize); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +s2u_lookup(size_t size) +{ + size_t ret = index2size_lookup(size2index_lookup(size)); + + assert(ret == s2u_compute(size)); + return (ret); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size. + */ +JEMALLOC_ALWAYS_INLINE size_t +s2u(size_t size) +{ + + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) + return (s2u_lookup(size)); + return (s2u_compute(size)); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size and alignment. + */ +JEMALLOC_ALWAYS_INLINE size_t +sa2u(size_t size, size_t alignment) +{ + size_t usize; + + assert(alignment != 0 && ((alignment - 1) & alignment) == 0); + + /* Try for a small size class. */ + if (size <= SMALL_MAXCLASS && alignment < PAGE) { + /* + * Round size up to the nearest multiple of alignment. + * + * This done, we can take advantage of the fact that for each + * small size class, every object is aligned at the smallest + * power of two that is non-zero in the base two representation + * of the size. For example: + * + * Size | Base 2 | Minimum alignment + * -----+----------+------------------ + * 96 | 1100000 | 32 + * 144 | 10100000 | 32 + * 192 | 11000000 | 64 + */ + usize = s2u(ALIGNMENT_CEILING(size, alignment)); + if (usize < LARGE_MINCLASS) + return (usize); + } + + /* Try for a large size class. */ + if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { + /* + * We can't achieve subpage alignment, so round up alignment + * to the minimum that can actually be supported. + */ + alignment = PAGE_CEILING(alignment); + + /* Make sure result is a large size class. */ + usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); + + /* + * Calculate the size of the over-size run that arena_palloc() + * would need to allocate in order to guarantee the alignment. + */ + if (usize + large_pad + alignment - PAGE <= arena_maxrun) + return (usize); + } + + /* Huge size class. Beware of overflow. */ + + if (unlikely(alignment > HUGE_MAXCLASS)) + return (0); + + /* + * We can't achieve subchunk alignment, so round up alignment to the + * minimum that can actually be supported. + */ + alignment = CHUNK_CEILING(alignment); + + /* Make sure result is a huge size class. */ + if (size <= chunksize) + usize = chunksize; + else { + usize = s2u(size); + if (usize < size) { + /* size_t overflow. */ + return (0); + } + } + + /* + * Calculate the multi-chunk mapping that huge_palloc() would need in + * order to guarantee the alignment. + */ + if (usize + alignment - PAGE < usize) { + /* size_t overflow. */ + return (0); + } + return (usize); +} + +/* Choose an arena based on a per-thread value. */ +JEMALLOC_INLINE arena_t * +arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) +{ + arena_t *ret; + + if (arena != NULL) + return (arena); + + ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); + if (unlikely(ret == NULL)) + ret = arena_choose_hard(tsd, internal); + + return (ret); +} + +JEMALLOC_INLINE arena_t * +arena_choose(tsd_t *tsd, arena_t *arena) +{ + + return (arena_choose_impl(tsd, arena, false)); +} + +JEMALLOC_INLINE arena_t * +arena_ichoose(tsd_t *tsd, arena_t *arena) +{ + + return (arena_choose_impl(tsd, arena, true)); +} + +JEMALLOC_INLINE arena_tdata_t * +arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) +{ + arena_tdata_t *tdata; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + + if (unlikely(arenas_tdata == NULL)) { + /* arenas_tdata hasn't been initialized yet. */ + return (arena_tdata_get_hard(tsd, ind)); + } + if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { + /* + * ind is invalid, cache is old (too small), or tdata to be + * initialized. + */ + return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : + NULL); + } + + tdata = &arenas_tdata[ind]; + if (likely(tdata != NULL) || !refresh_if_missing) + return (tdata); + return (arena_tdata_get_hard(tsd, ind)); +} + +JEMALLOC_INLINE arena_t * +arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) +{ + arena_t *ret; + + assert(ind <= MALLOCX_ARENA_MAX); + + ret = arenas[ind]; + if (unlikely(ret == NULL)) { + ret = atomic_read_p((void *)&arenas[ind]); + if (init_if_missing && unlikely(ret == NULL)) + ret = arena_init(tsdn, ind); + } + return (ret); +} + +JEMALLOC_INLINE ticker_t * +decay_ticker_get(tsd_t *tsd, unsigned ind) +{ + arena_tdata_t *tdata; + + tdata = arena_tdata_get(tsd, ind, true); + if (unlikely(tdata == NULL)) + return (NULL); + return (&tdata->decay_ticker); +} +#endif + +#include "jemalloc/internal/bitmap.h" +/* + * Include portions of arena.h interleaved with tcache.h in order to resolve + * circular dependencies. + */ +#define JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/tcache.h" +#define JEMALLOC_ARENA_INLINE_B +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_B +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" + +#ifndef JEMALLOC_ENABLE_INLINE +arena_t *iaalloc(const void *ptr); +size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote); +void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path); +void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, + bool slow_path); +void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena); +void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena); +void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); +size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote); +size_t u2rz(size_t usize); +size_t p2rz(tsdn_t *tsdn, const void *ptr); +void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, + bool slow_path); +void idalloc(tsd_t *tsd, void *ptr); +void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); +void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, + arena_t *arena); +void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); +void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero); +bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_ALWAYS_INLINE arena_t * +iaalloc(const void *ptr) +{ + + assert(ptr != NULL); + + return (arena_aalloc(ptr)); +} + +/* + * Typical usage: + * tsdn_t *tsdn = [...] + * void *ptr = [...] + * size_t sz = isalloc(tsdn, ptr, config_prof); + */ +JEMALLOC_ALWAYS_INLINE size_t +isalloc(tsdn_t *tsdn, const void *ptr, bool demote) +{ + + assert(ptr != NULL); + /* Demotion only makes sense if config_prof is true. */ + assert(config_prof || !demote); + + return (arena_salloc(tsdn, ptr, demote)); +} + +JEMALLOC_ALWAYS_INLINE void * +iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, + bool is_metadata, arena_t *arena, bool slow_path) +{ + void *ret; + + assert(size != 0); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); + + ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), + isalloc(tsdn, ret, config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) +{ + + return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true), + false, NULL, slow_path)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena) +{ + void *ret; + + assert(usize != 0); + assert(usize == sa2u(usize, alignment)); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); + + ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret, + config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) +{ + + return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) +{ + + return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, + tcache_get(tsd, true), false, NULL)); +} + +JEMALLOC_ALWAYS_INLINE size_t +ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) +{ + extent_node_t *node; + + /* Return 0 if ptr is not within a chunk managed by jemalloc. */ + node = chunk_lookup(ptr, false); + if (node == NULL) + return (0); + /* Only arena chunks should be looked up via interior pointers. */ + assert(extent_node_addr_get(node) == ptr || + extent_node_achunk_get(node)); + + return (isalloc(tsdn, ptr, demote)); +} + +JEMALLOC_INLINE size_t +u2rz(size_t usize) +{ + size_t ret; + + if (usize <= SMALL_MAXCLASS) { + szind_t binind = size2index(usize); + ret = arena_bin_info[binind].redzone_size; + } else + ret = 0; + + return (ret); +} + +JEMALLOC_INLINE size_t +p2rz(tsdn_t *tsdn, const void *ptr) +{ + size_t usize = isalloc(tsdn, ptr, false); + + return (u2rz(usize)); +} + +JEMALLOC_ALWAYS_INLINE void +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, + bool slow_path) +{ + + assert(ptr != NULL); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); + if (config_stats && is_metadata) { + arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr, + config_prof)); + } + + arena_dalloc(tsdn, ptr, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +idalloc(tsd_t *tsd, void *ptr) +{ + + idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true); +} + +JEMALLOC_ALWAYS_INLINE void +iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) +{ + + if (slow_path && config_fill && unlikely(opt_quarantine)) + quarantine(tsd, ptr); + else + idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) +{ + + arena_sdalloc(tsdn, ptr, size, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) +{ + + if (slow_path && config_fill && unlikely(opt_quarantine)) + quarantine(tsd, ptr); + else + isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) +{ + void *p; + size_t usize, copysize; + + usize = sa2u(size + extra, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + return (NULL); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena); + if (p == NULL) { + if (extra == 0) + return (NULL); + /* Try again, without extra this time. */ + usize = sa2u(size, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + return (NULL); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, + arena); + if (p == NULL) + return (NULL); + } + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(p, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache, true); + return (p); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero, tcache_t *tcache, arena_t *arena) +{ + + assert(ptr != NULL); + assert(size != 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* + * Existing object alignment is inadequate; allocate new space + * and copy. + */ + return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment, + zero, tcache, arena)); + } + + return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, + tcache)); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero) +{ + + return (iralloct(tsd, ptr, oldsize, size, alignment, zero, + tcache_get(tsd, true), NULL)); +} + +JEMALLOC_ALWAYS_INLINE bool +ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero) +{ + + assert(ptr != NULL); + assert(size != 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* Existing object alignment is inadequate. */ + return (true); + } + + return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero)); +} +#endif + +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_INLINES +/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h.in b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h.in new file mode 100755 index 00000000..e3b499a8 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal.h.in @@ -0,0 +1,1294 @@ +#ifndef JEMALLOC_INTERNAL_H +#define JEMALLOC_INTERNAL_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc@install_suffix@.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) @private_namespace@##n +# include "../jemalloc@install_suffix@.h" +#endif +#include "jemalloc/internal/private_namespace.h" + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_munmap = +#ifdef JEMALLOC_MUNMAP + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; +static const bool config_thp = +#ifdef JEMALLOC_THP + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_valgrind = +#ifdef JEMALLOC_VALGRIND + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_ivsalloc = +#ifdef JEMALLOC_IVSALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; + +#ifdef JEMALLOC_C11ATOMICS +#include +#endif + +#ifdef JEMALLOC_ATOMIC9 +#include +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#endif + +#include "jemalloc/internal/ph.h" +#ifndef __PGI +#define RB_COMPACT +#endif +#include "jemalloc/internal/rb.h" +#include "jemalloc/internal/qr.h" +#include "jemalloc/internal/ql.h" + +/* + * jemalloc can conceptually be broken into components (arena, tcache, etc.), + * but there are circular dependencies that cannot be broken without + * substantial performance degradation. In order to reduce the effect on + * visual code flow, read the header files in multiple passes, with one of the + * following cpp variables defined during each pass: + * + * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * types. + * JEMALLOC_H_STRUCTS : Data structures. + * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. + * JEMALLOC_H_INLINES : Inline functions. + */ +/******************************************************************************/ +#define JEMALLOC_H_TYPES + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* Page size index type. */ +typedef unsigned pszind_t; + +/* Size class index type. */ +typedef unsigned szind_t; + +/* + * Flags bits: + * + * a: arena + * t: tcache + * 0: unused + * z: zero + * n: alignment + * + * aaaaaaaa aaaatttt tttttttt 0znnnnnn + */ +#define MALLOCX_ARENA_MASK ((int)~0xfffff) +#define MALLOCX_ARENA_MAX 0xffe +#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU) +#define MALLOCX_TCACHE_MAX 0xffd +#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) +/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ +#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ + (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) +#define MALLOCX_ALIGN_GET(flags) \ + (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) +#define MALLOCX_ZERO_GET(flags) \ + ((bool)(flags & MALLOCX_ZERO)) + +#define MALLOCX_TCACHE_GET(flags) \ + (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2) +#define MALLOCX_ARENA_GET(flags) \ + (((unsigned)(((unsigned)flags) >> 20)) - 1) + +/* Smallest size class to support. */ +#define TINY_MIN (1U << LG_TINY_MIN) + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#ifndef LG_QUANTUM +# if (defined(__i386__) || defined(_M_IX86)) +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# ifdef __aarch64__ +# define LG_QUANTUM 4 +# endif +# ifdef __hppa__ +# define LG_QUANTUM 4 +# endif +# ifdef __mips__ +# define LG_QUANTUM 3 +# endif +# ifdef __or1k__ +# define LG_QUANTUM 3 +# endif +# ifdef __powerpc__ +# define LG_QUANTUM 4 +# endif +# ifdef __riscv__ +# define LG_QUANTUM 4 +# endif +# ifdef __s390__ +# define LG_QUANTUM 4 +# endif +# ifdef __SH4__ +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifdef __le32__ +# define LG_QUANTUM 4 +# endif +# ifndef LG_QUANTUM +# error "Unknown minimum alignment for architecture; specify via " + "--with-lg-quantum" +# endif +#endif + +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) + +/* Return the smallest quantum multiple that is >= a. */ +#define QUANTUM_CEILING(a) \ + (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) + +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) + +/* Return the smallest long multiple that is >= a. */ +#define LONG_CEILING(a) \ + (((a) + LONG_MASK) & ~LONG_MASK) + +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) + +/* Return the smallest (void *) multiple that is >= a. */ +#define PTR_CEILING(a) \ + (((a) + PTR_MASK) & ~PTR_MASK) + +/* + * Maximum size of L1 cache line. This is used to avoid cache line aliasing. + * In addition, this controls the spacing of cacheline-spaced size classes. + * + * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can + * only handle raw constants. + */ +#define LG_CACHELINE 6 +#define CACHELINE 64 +#define CACHELINE_MASK (CACHELINE - 1) + +/* Return the smallest cacheline multiple that is >= s. */ +#define CACHELINE_CEILING(s) \ + (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) + +/* Page size. LG_PAGE is determined by the configure script. */ +#ifdef PAGE_MASK +# undef PAGE_MASK +#endif +#define PAGE ((size_t)(1U << LG_PAGE)) +#define PAGE_MASK ((size_t)(PAGE - 1)) + +/* Return the page base address for the page containing address a. */ +#define PAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~PAGE_MASK)) + +/* Return the smallest pagesize multiple that is >= s. */ +#define PAGE_CEILING(s) \ + (((s) + PAGE_MASK) & ~PAGE_MASK) + +/* Return the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) + +/* Return the offset between a and the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ + ((size_t)((uintptr_t)(a) & (alignment - 1))) + +/* Return the smallest alignment multiple that is >= s. */ +#define ALIGNMENT_CEILING(s, alignment) \ + (((s) + (alignment - 1)) & ((~(alignment)) + 1)) + +/* Declare a variable-length array. */ +#if __STDC_VERSION__ < 199901L +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef JEMALLOC_HAS_ALLOCA_H +# include +# else +# include +# endif +# endif +# define VARIABLE_ARRAY(type, name, count) \ + type *name = alloca(sizeof(type) * (count)) +#else +# define VARIABLE_ARRAY(type, name, count) type name[(count)] +#endif + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_TYPES +/******************************************************************************/ +#define JEMALLOC_H_STRUCTS + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#define JEMALLOC_ARENA_STRUCTS_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_STRUCTS_A +#include "jemalloc/internal/extent.h" +#define JEMALLOC_ARENA_STRUCTS_B +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_STRUCTS_B +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#include "jemalloc/internal/tsd.h" + +#undef JEMALLOC_H_STRUCTS +/******************************************************************************/ +#define JEMALLOC_H_EXTERNS + +extern bool opt_abort; +extern const char *opt_junk; +extern bool opt_junk_alloc; +extern bool opt_junk_free; +extern size_t opt_quarantine; +extern bool opt_redzone; +extern bool opt_utrace; +extern bool opt_xmalloc; +extern bool opt_zero; +extern unsigned opt_narenas; + +extern bool in_valgrind; + +/* Number of CPUs. */ +extern unsigned ncpus; + +/* Number of arenas used for automatic multiplexing of threads and arenas. */ +extern unsigned narenas_auto; + +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + */ +extern arena_t **arenas; + +/* + * pind2sz_tab encodes the same information as could be computed by + * pind2sz_compute(). + */ +extern size_t const pind2sz_tab[NPSIZES]; +/* + * index2size_tab encodes the same information as could be computed (at + * unacceptable cost in some code paths) by index2size_compute(). + */ +extern size_t const index2size_tab[NSIZES]; +/* + * size2index_tab is a compact lookup table that rounds request sizes up to + * size classes. In order to reduce cache footprint, the table is compressed, + * and all accesses are via size2index(). + */ +extern uint8_t const size2index_tab[]; + +arena_t *a0get(void); +void *a0malloc(size_t size); +void a0dalloc(void *ptr); +void *bootstrap_malloc(size_t size); +void *bootstrap_calloc(size_t num, size_t size); +void bootstrap_free(void *ptr); +unsigned narenas_total_get(void); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind); +arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); +arena_t *arena_choose_hard(tsd_t *tsd, bool internal); +void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); +void thread_allocated_cleanup(tsd_t *tsd); +void thread_deallocated_cleanup(tsd_t *tsd); +void iarena_cleanup(tsd_t *tsd); +void arena_cleanup(tsd_t *tsd); +void arenas_tdata_cleanup(tsd_t *tsd); +void narenas_tdata_cleanup(tsd_t *tsd); +void arenas_tdata_bypass_cleanup(tsd_t *tsd); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" +#include "jemalloc/internal/tsd.h" + +#undef JEMALLOC_H_EXTERNS +/******************************************************************************/ +#define JEMALLOC_H_INLINES + +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/witness.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" + +#ifndef JEMALLOC_ENABLE_INLINE +pszind_t psz2ind(size_t psz); +size_t pind2sz_compute(pszind_t pind); +size_t pind2sz_lookup(pszind_t pind); +size_t pind2sz(pszind_t pind); +size_t psz2u(size_t psz); +szind_t size2index_compute(size_t size); +szind_t size2index_lookup(size_t size); +szind_t size2index(size_t size); +size_t index2size_compute(szind_t index); +size_t index2size_lookup(szind_t index); +size_t index2size(szind_t index); +size_t s2u_compute(size_t size); +size_t s2u_lookup(size_t size); +size_t s2u(size_t size); +size_t sa2u(size_t size, size_t alignment); +arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal); +arena_t *arena_choose(tsd_t *tsd, arena_t *arena); +arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena); +arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, + bool refresh_if_missing); +arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing); +ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_INLINE pszind_t +psz2ind(size_t psz) +{ + + if (unlikely(psz > HUGE_MAXCLASS)) + return (NPSIZES); + { + pszind_t x = lg_floor((psz<<1)-1); + pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - + (LG_SIZE_CLASS_GROUP + LG_PAGE); + pszind_t grp = shift << LG_SIZE_CLASS_GROUP; + + pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZI(-1) << lg_delta; + pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + pszind_t ind = grp + mod; + return (ind); + } +} + +JEMALLOC_INLINE size_t +pind2sz_compute(pszind_t pind) +{ + + { + size_t grp = pind >> LG_SIZE_CLASS_GROUP; + size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_PAGE + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_PAGE-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t sz = grp_size + mod_size; + return (sz); + } +} + +JEMALLOC_INLINE size_t +pind2sz_lookup(pszind_t pind) +{ + size_t ret = (size_t)pind2sz_tab[pind]; + assert(ret == pind2sz_compute(pind)); + return (ret); +} + +JEMALLOC_INLINE size_t +pind2sz(pszind_t pind) +{ + + assert(pind < NPSIZES); + return (pind2sz_lookup(pind)); +} + +JEMALLOC_INLINE size_t +psz2u(size_t psz) +{ + + if (unlikely(psz > HUGE_MAXCLASS)) + return (0); + { + size_t x = lg_floor((psz<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (psz + delta_mask) & ~delta_mask; + return (usize); + } +} + +JEMALLOC_INLINE szind_t +size2index_compute(size_t size) +{ + + if (unlikely(size > HUGE_MAXCLASS)) + return (NSIZES); +#if (NTBINS != 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); + } +#endif + { + szind_t x = lg_floor((size<<1)-1); + szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : + x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); + szind_t grp = shift << LG_SIZE_CLASS_GROUP; + + szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZI(-1) << lg_delta; + szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + szind_t index = NTBINS + grp + mod; + return (index); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +size2index_lookup(size_t size) +{ + + assert(size <= LOOKUP_MAXCLASS); + { + szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]); + assert(ret == size2index_compute(size)); + return (ret); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +size2index(size_t size) +{ + + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) + return (size2index_lookup(size)); + return (size2index_compute(size)); +} + +JEMALLOC_INLINE size_t +index2size_compute(szind_t index) +{ + +#if (NTBINS > 0) + if (index < NTBINS) + return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); +#endif + { + size_t reduced_index = index - NTBINS; + size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; + size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - + 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_QUANTUM + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_QUANTUM-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t usize = grp_size + mod_size; + return (usize); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +index2size_lookup(szind_t index) +{ + size_t ret = (size_t)index2size_tab[index]; + assert(ret == index2size_compute(index)); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +index2size(szind_t index) +{ + + assert(index < NSIZES); + return (index2size_lookup(index)); +} + +JEMALLOC_ALWAYS_INLINE size_t +s2u_compute(size_t size) +{ + + if (unlikely(size > HUGE_MAXCLASS)) + return (0); +#if (NTBINS > 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : + (ZU(1) << lg_ceil)); + } +#endif + { + size_t x = lg_floor((size<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (size + delta_mask) & ~delta_mask; + return (usize); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +s2u_lookup(size_t size) +{ + size_t ret = index2size_lookup(size2index_lookup(size)); + + assert(ret == s2u_compute(size)); + return (ret); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size. + */ +JEMALLOC_ALWAYS_INLINE size_t +s2u(size_t size) +{ + + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) + return (s2u_lookup(size)); + return (s2u_compute(size)); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size and alignment. + */ +JEMALLOC_ALWAYS_INLINE size_t +sa2u(size_t size, size_t alignment) +{ + size_t usize; + + assert(alignment != 0 && ((alignment - 1) & alignment) == 0); + + /* Try for a small size class. */ + if (size <= SMALL_MAXCLASS && alignment < PAGE) { + /* + * Round size up to the nearest multiple of alignment. + * + * This done, we can take advantage of the fact that for each + * small size class, every object is aligned at the smallest + * power of two that is non-zero in the base two representation + * of the size. For example: + * + * Size | Base 2 | Minimum alignment + * -----+----------+------------------ + * 96 | 1100000 | 32 + * 144 | 10100000 | 32 + * 192 | 11000000 | 64 + */ + usize = s2u(ALIGNMENT_CEILING(size, alignment)); + if (usize < LARGE_MINCLASS) + return (usize); + } + + /* Try for a large size class. */ + if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { + /* + * We can't achieve subpage alignment, so round up alignment + * to the minimum that can actually be supported. + */ + alignment = PAGE_CEILING(alignment); + + /* Make sure result is a large size class. */ + usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); + + /* + * Calculate the size of the over-size run that arena_palloc() + * would need to allocate in order to guarantee the alignment. + */ + if (usize + large_pad + alignment - PAGE <= arena_maxrun) + return (usize); + } + + /* Huge size class. Beware of overflow. */ + + if (unlikely(alignment > HUGE_MAXCLASS)) + return (0); + + /* + * We can't achieve subchunk alignment, so round up alignment to the + * minimum that can actually be supported. + */ + alignment = CHUNK_CEILING(alignment); + + /* Make sure result is a huge size class. */ + if (size <= chunksize) + usize = chunksize; + else { + usize = s2u(size); + if (usize < size) { + /* size_t overflow. */ + return (0); + } + } + + /* + * Calculate the multi-chunk mapping that huge_palloc() would need in + * order to guarantee the alignment. + */ + if (usize + alignment - PAGE < usize) { + /* size_t overflow. */ + return (0); + } + return (usize); +} + +/* Choose an arena based on a per-thread value. */ +JEMALLOC_INLINE arena_t * +arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) +{ + arena_t *ret; + + if (arena != NULL) + return (arena); + + ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); + if (unlikely(ret == NULL)) + ret = arena_choose_hard(tsd, internal); + + return (ret); +} + +JEMALLOC_INLINE arena_t * +arena_choose(tsd_t *tsd, arena_t *arena) +{ + + return (arena_choose_impl(tsd, arena, false)); +} + +JEMALLOC_INLINE arena_t * +arena_ichoose(tsd_t *tsd, arena_t *arena) +{ + + return (arena_choose_impl(tsd, arena, true)); +} + +JEMALLOC_INLINE arena_tdata_t * +arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) +{ + arena_tdata_t *tdata; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + + if (unlikely(arenas_tdata == NULL)) { + /* arenas_tdata hasn't been initialized yet. */ + return (arena_tdata_get_hard(tsd, ind)); + } + if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { + /* + * ind is invalid, cache is old (too small), or tdata to be + * initialized. + */ + return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : + NULL); + } + + tdata = &arenas_tdata[ind]; + if (likely(tdata != NULL) || !refresh_if_missing) + return (tdata); + return (arena_tdata_get_hard(tsd, ind)); +} + +JEMALLOC_INLINE arena_t * +arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) +{ + arena_t *ret; + + assert(ind <= MALLOCX_ARENA_MAX); + + ret = arenas[ind]; + if (unlikely(ret == NULL)) { + ret = atomic_read_p((void *)&arenas[ind]); + if (init_if_missing && unlikely(ret == NULL)) + ret = arena_init(tsdn, ind); + } + return (ret); +} + +JEMALLOC_INLINE ticker_t * +decay_ticker_get(tsd_t *tsd, unsigned ind) +{ + arena_tdata_t *tdata; + + tdata = arena_tdata_get(tsd, ind, true); + if (unlikely(tdata == NULL)) + return (NULL); + return (&tdata->decay_ticker); +} +#endif + +#include "jemalloc/internal/bitmap.h" +/* + * Include portions of arena.h interleaved with tcache.h in order to resolve + * circular dependencies. + */ +#define JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/tcache.h" +#define JEMALLOC_ARENA_INLINE_B +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_B +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" + +#ifndef JEMALLOC_ENABLE_INLINE +arena_t *iaalloc(const void *ptr); +size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote); +void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path); +void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, + bool slow_path); +void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena); +void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena); +void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); +size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote); +size_t u2rz(size_t usize); +size_t p2rz(tsdn_t *tsdn, const void *ptr); +void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, + bool slow_path); +void idalloc(tsd_t *tsd, void *ptr); +void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); +void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, + arena_t *arena); +void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); +void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero); +bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_ALWAYS_INLINE arena_t * +iaalloc(const void *ptr) +{ + + assert(ptr != NULL); + + return (arena_aalloc(ptr)); +} + +/* + * Typical usage: + * tsdn_t *tsdn = [...] + * void *ptr = [...] + * size_t sz = isalloc(tsdn, ptr, config_prof); + */ +JEMALLOC_ALWAYS_INLINE size_t +isalloc(tsdn_t *tsdn, const void *ptr, bool demote) +{ + + assert(ptr != NULL); + /* Demotion only makes sense if config_prof is true. */ + assert(config_prof || !demote); + + return (arena_salloc(tsdn, ptr, demote)); +} + +JEMALLOC_ALWAYS_INLINE void * +iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, + bool is_metadata, arena_t *arena, bool slow_path) +{ + void *ret; + + assert(size != 0); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); + + ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), + isalloc(tsdn, ret, config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) +{ + + return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true), + false, NULL, slow_path)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena) +{ + void *ret; + + assert(usize != 0); + assert(usize == sa2u(usize, alignment)); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); + + ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret, + config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) +{ + + return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) +{ + + return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, + tcache_get(tsd, true), false, NULL)); +} + +JEMALLOC_ALWAYS_INLINE size_t +ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) +{ + extent_node_t *node; + + /* Return 0 if ptr is not within a chunk managed by jemalloc. */ + node = chunk_lookup(ptr, false); + if (node == NULL) + return (0); + /* Only arena chunks should be looked up via interior pointers. */ + assert(extent_node_addr_get(node) == ptr || + extent_node_achunk_get(node)); + + return (isalloc(tsdn, ptr, demote)); +} + +JEMALLOC_INLINE size_t +u2rz(size_t usize) +{ + size_t ret; + + if (usize <= SMALL_MAXCLASS) { + szind_t binind = size2index(usize); + ret = arena_bin_info[binind].redzone_size; + } else + ret = 0; + + return (ret); +} + +JEMALLOC_INLINE size_t +p2rz(tsdn_t *tsdn, const void *ptr) +{ + size_t usize = isalloc(tsdn, ptr, false); + + return (u2rz(usize)); +} + +JEMALLOC_ALWAYS_INLINE void +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, + bool slow_path) +{ + + assert(ptr != NULL); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); + if (config_stats && is_metadata) { + arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr, + config_prof)); + } + + arena_dalloc(tsdn, ptr, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +idalloc(tsd_t *tsd, void *ptr) +{ + + idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true); +} + +JEMALLOC_ALWAYS_INLINE void +iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) +{ + + if (slow_path && config_fill && unlikely(opt_quarantine)) + quarantine(tsd, ptr); + else + idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) +{ + + arena_sdalloc(tsdn, ptr, size, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) +{ + + if (slow_path && config_fill && unlikely(opt_quarantine)) + quarantine(tsd, ptr); + else + isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) +{ + void *p; + size_t usize, copysize; + + usize = sa2u(size + extra, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + return (NULL); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena); + if (p == NULL) { + if (extra == 0) + return (NULL); + /* Try again, without extra this time. */ + usize = sa2u(size, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + return (NULL); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, + arena); + if (p == NULL) + return (NULL); + } + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(p, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache, true); + return (p); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero, tcache_t *tcache, arena_t *arena) +{ + + assert(ptr != NULL); + assert(size != 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* + * Existing object alignment is inadequate; allocate new space + * and copy. + */ + return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment, + zero, tcache, arena)); + } + + return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, + tcache)); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero) +{ + + return (iralloct(tsd, ptr, oldsize, size, alignment, zero, + tcache_get(tsd, true), NULL)); +} + +JEMALLOC_ALWAYS_INLINE bool +ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero) +{ + + assert(ptr != NULL); + assert(size != 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* Existing object alignment is inadequate. */ + return (true); + } + + return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero)); +} +#endif + +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_INLINES +/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_decls.h b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_decls.h new file mode 100755 index 00000000..c907d910 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_decls.h @@ -0,0 +1,75 @@ +#ifndef JEMALLOC_INTERNAL_DECLS_H +#define JEMALLOC_INTERNAL_DECLS_H + +#include +#ifdef _WIN32 +# include +# include "msvc_compat/windows_extra.h" + +#else +# include +# include +# if !defined(__pnacl__) && !defined(__native_client__) +# include +# if !defined(SYS_write) && defined(__NR_write) +# define SYS_write __NR_write +# endif +# include +# endif +# include +# ifdef JEMALLOC_OS_UNFAIR_LOCK +# include +# endif +# ifdef JEMALLOC_GLIBC_MALLOC_HOOK +# include +# endif +# include +# include +# include +# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# include +# endif +#endif +#include + +#include +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#include +#include +#include +#include +#include +#include +#ifndef offsetof +# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) +#endif +#include +#include +#include +#ifdef _MSC_VER +# include +typedef intptr_t ssize_t; +# define PATH_MAX 1024 +# define STDERR_FILENO 2 +# define __func__ __FUNCTION__ +# ifdef JEMALLOC_HAS_RESTRICT +# define restrict __restrict +# endif +/* Disable warnings about deprecated system functions. */ +# pragma warning(disable: 4996) +#if _MSC_VER < 1800 +static int +isblank(int c) +{ + + return (c == '\t' || c == ' '); +} +#endif +#else +# include +#endif +#include + +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h new file mode 100755 index 00000000..f3b1019c --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h @@ -0,0 +1,316 @@ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +#define JEMALLOC_PREFIX "je_" +#define JEMALLOC_CPREFIX "JE_" + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE je_ + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#define CPU_SPINWAIT _mm_pause() + +/* Defined if C11 atomics are available. */ +/* #undef JEMALLOC_C11ATOMICS */ + +/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ +/* #undef JEMALLOC_ATOMIC9 */ + +/* + * Defined if OSAtomic*() functions are available, as provided by Darwin, and + * documented in the atomic(3) manual page. + */ +/* #undef JEMALLOC_OSATOMIC */ + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +/* #undef JEMALLOC_HAVE_BUILTIN_CLZ */ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +/* #undef JEMALLOC_OS_UNFAIR_LOCK */ + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +/* #undef JEMALLOC_OSSPIN */ + +/* Defined if syscall(2) is usable. */ +/* #undef JEMALLOC_USE_SYSCALL */ + +/* + * Defined if secure_getenv(3) is available. + */ +/* #undef JEMALLOC_HAVE_SECURE_GETENV */ + +/* + * Defined if issetugid(2) is available. + */ +/* #undef JEMALLOC_HAVE_ISSETUGID */ + +/* Defined if pthread_atfork(3) is available. */ +/* #undef JEMALLOC_HAVE_PTHREAD_ATFORK */ + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */ + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC */ + +/* + * Defined if mach_absolute_time() is available. + */ +/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +/* #undef JEMALLOC_THREADED_INIT */ + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +/* #undef JEMALLOC_MUTEX_INIT_CB */ + +/* Non-empty if the tls_model attribute is supported. */ +#define JEMALLOC_TLS_MODEL + +/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ +#define JEMALLOC_CC_SILENCE + +/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ +/* #undef JEMALLOC_CODE_COVERAGE */ + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +#define JEMALLOC_DEBUG + +/* JEMALLOC_STATS enables statistics calculation. */ +#define JEMALLOC_STATS + +/* JEMALLOC_PROF enables allocation profiling. */ +/* #undef JEMALLOC_PROF */ + +/* Use libunwind for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBUNWIND */ + +/* Use libgcc for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBGCC */ + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* + * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. + * This makes it possible to allocate/deallocate objects without any locking + * when the cache is in the steady state. + */ +/* #undef JEMALLOC_TCACHE */ + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage + * segment (DSS). + */ +/* #undef JEMALLOC_DSS */ + +/* Support memory filling (junk/zero/quarantine/redzone). */ +#define JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +/* #undef JEMALLOC_UTRACE */ + +/* Support Valgrind. */ +/* #undef JEMALLOC_VALGRIND */ + +/* Support optional abort() on OOM. */ +/* #undef JEMALLOC_XMALLOC */ + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +/* #undef JEMALLOC_LAZY_LOCK */ + +/* Minimum size class to support is 2^LG_TINY_MIN bytes. */ +#define LG_TINY_MIN 3 + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +/* #undef LG_QUANTUM */ + +/* One page is 2^LG_PAGE bytes. */ +#define LG_PAGE 12 + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +/* #undef JEMALLOC_MAPS_COALESCE */ + +/* + * If defined, use munmap() to unmap freed chunks, rather than storing them for + * later reuse. This is disabled by default on Linux because common sequences + * of mmap()/munmap() calls will cause virtual memory map holes. + */ +#define JEMALLOC_MUNMAP + +/* TLS is used to map arenas and magazine caches to threads. */ +/* #undef JEMALLOC_TLS */ + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#define JEMALLOC_INTERNAL_UNREACHABLE abort + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#define JEMALLOC_INTERNAL_FFSLL ffsll +#define JEMALLOC_INTERNAL_FFSL ffsl +#define JEMALLOC_INTERNAL_FFS ffs + +/* + * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside + * within jemalloc-owned chunks before dereferencing them. + */ +#define JEMALLOC_IVSALLOC + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#define JEMALLOC_CACHE_OBLIVIOUS + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ +/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */ + +/* Defined if madvise(2) is available. */ +/* #undef JEMALLOC_HAVE_MADVISE */ + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +/* #undef JEMALLOC_HAVE_MADVISE_HUGE */ + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that + * new pages will be demand-zeroed if the + * address region is later touched. + */ +/* #undef JEMALLOC_PURGE_MADVISE_FREE */ +/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */ + +/* Defined if transparent huge page support is enabled. */ +/* #undef JEMALLOC_THP */ + +/* Define if operating system has alloca.h header. */ +/* #undef JEMALLOC_HAS_ALLOCA_H */ + +/* C99 restrict keyword supported. */ +/* #undef JEMALLOC_HAS_RESTRICT */ + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#define LG_SIZEOF_LONG 2 + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#define LG_SIZEOF_LONG_LONG 3 + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +/* #undef JEMALLOC_GLIBC_MALLOC_HOOK */ + +/* glibc memalign hook. */ +/* #undef JEMALLOC_GLIBC_MEMALIGN_HOOK */ + +/* Adaptive mutex support in pthreads. */ +/* #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP */ + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +/* #undef JEMALLOC_EXPORT */ + +/* config.malloc_conf options string. */ +#define JEMALLOC_CONFIG_MALLOC_CONF "" + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h.in b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h.in new file mode 100755 index 00000000..7c88b0d7 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_defs.h.in @@ -0,0 +1,315 @@ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +#undef JEMALLOC_PREFIX +#undef JEMALLOC_CPREFIX + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#undef JEMALLOC_PRIVATE_NAMESPACE + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#undef CPU_SPINWAIT + +/* Defined if C11 atomics are available. */ +#undef JEMALLOC_C11ATOMICS + +/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ +#undef JEMALLOC_ATOMIC9 + +/* + * Defined if OSAtomic*() functions are available, as provided by Darwin, and + * documented in the atomic(3) manual page. + */ +#undef JEMALLOC_OSATOMIC + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#undef JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +#undef JEMALLOC_OS_UNFAIR_LOCK + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +#undef JEMALLOC_OSSPIN + +/* Defined if syscall(2) is usable. */ +#undef JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +#undef JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +#undef JEMALLOC_HAVE_ISSETUGID + +/* Defined if pthread_atfork(3) is available. */ +#undef JEMALLOC_HAVE_PTHREAD_ATFORK + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#undef JEMALLOC_HAVE_CLOCK_MONOTONIC + +/* + * Defined if mach_absolute_time() is available. + */ +#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +#undef JEMALLOC_MALLOC_THREAD_CLEANUP + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#undef JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +#undef JEMALLOC_MUTEX_INIT_CB + +/* Non-empty if the tls_model attribute is supported. */ +#undef JEMALLOC_TLS_MODEL + +/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ +#undef JEMALLOC_CC_SILENCE + +/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ +#undef JEMALLOC_CODE_COVERAGE + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +#undef JEMALLOC_DEBUG + +/* JEMALLOC_STATS enables statistics calculation. */ +#undef JEMALLOC_STATS + +/* JEMALLOC_PROF enables allocation profiling. */ +#undef JEMALLOC_PROF + +/* Use libunwind for profile backtracing if defined. */ +#undef JEMALLOC_PROF_LIBUNWIND + +/* Use libgcc for profile backtracing if defined. */ +#undef JEMALLOC_PROF_LIBGCC + +/* Use gcc intrinsics for profile backtracing if defined. */ +#undef JEMALLOC_PROF_GCC + +/* + * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. + * This makes it possible to allocate/deallocate objects without any locking + * when the cache is in the steady state. + */ +#undef JEMALLOC_TCACHE + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage + * segment (DSS). + */ +#undef JEMALLOC_DSS + +/* Support memory filling (junk/zero/quarantine/redzone). */ +#undef JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +#undef JEMALLOC_UTRACE + +/* Support Valgrind. */ +#undef JEMALLOC_VALGRIND + +/* Support optional abort() on OOM. */ +#undef JEMALLOC_XMALLOC + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +#undef JEMALLOC_LAZY_LOCK + +/* Minimum size class to support is 2^LG_TINY_MIN bytes. */ +#undef LG_TINY_MIN + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#undef LG_QUANTUM + +/* One page is 2^LG_PAGE bytes. */ +#undef LG_PAGE + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#undef JEMALLOC_MAPS_COALESCE + +/* + * If defined, use munmap() to unmap freed chunks, rather than storing them for + * later reuse. This is disabled by default on Linux because common sequences + * of mmap()/munmap() calls will cause virtual memory map holes. + */ +#undef JEMALLOC_MUNMAP + +/* TLS is used to map arenas and magazine caches to threads. */ +#undef JEMALLOC_TLS + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#undef JEMALLOC_INTERNAL_UNREACHABLE + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#undef JEMALLOC_INTERNAL_FFSLL +#undef JEMALLOC_INTERNAL_FFSL +#undef JEMALLOC_INTERNAL_FFS + +/* + * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside + * within jemalloc-owned chunks before dereferencing them. + */ +#undef JEMALLOC_IVSALLOC + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#undef JEMALLOC_CACHE_OBLIVIOUS + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +#undef JEMALLOC_ZONE + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#undef JEMALLOC_HAVE_MADVISE + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +#undef JEMALLOC_HAVE_MADVISE_HUGE + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that + * new pages will be demand-zeroed if the + * address region is later touched. + */ +#undef JEMALLOC_PURGE_MADVISE_FREE +#undef JEMALLOC_PURGE_MADVISE_DONTNEED + +/* Defined if transparent huge page support is enabled. */ +#undef JEMALLOC_THP + +/* Define if operating system has alloca.h header. */ +#undef JEMALLOC_HAS_ALLOCA_H + +/* C99 restrict keyword supported. */ +#undef JEMALLOC_HAS_RESTRICT + +/* For use by hash code. */ +#undef JEMALLOC_BIG_ENDIAN + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#undef LG_SIZEOF_INT + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#undef LG_SIZEOF_LONG + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#undef LG_SIZEOF_LONG_LONG + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#undef LG_SIZEOF_INTMAX_T + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#undef JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#undef JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* Adaptive mutex support in pthreads. */ +#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +#undef JEMALLOC_EXPORT + +/* config.malloc_conf options string. */ +#undef JEMALLOC_CONFIG_MALLOC_CONF + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_macros.h b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_macros.h new file mode 100755 index 00000000..a08ba772 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/jemalloc_internal_macros.h @@ -0,0 +1,57 @@ +/* + * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for + * functions that are static inline functions if inlining is enabled, and + * single-definition library-private functions if inlining is disabled. + * + * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in + * which case the denoted functions are always static, regardless of whether + * inlining is enabled. + */ +#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) + /* Disable inlining to make debugging/profiling easier. */ +# define JEMALLOC_ALWAYS_INLINE +# define JEMALLOC_ALWAYS_INLINE_C static +# define JEMALLOC_INLINE +# define JEMALLOC_INLINE_C static +# define inline +#else +# define JEMALLOC_ENABLE_INLINE +# ifdef JEMALLOC_HAVE_ATTR +# define JEMALLOC_ALWAYS_INLINE \ + static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) +# define JEMALLOC_ALWAYS_INLINE_C \ + static inline JEMALLOC_ATTR(always_inline) +# else +# define JEMALLOC_ALWAYS_INLINE static inline +# define JEMALLOC_ALWAYS_INLINE_C static inline +# endif +# define JEMALLOC_INLINE static inline +# define JEMALLOC_INLINE_C static inline +# ifdef _MSC_VER +# define inline _inline +# endif +#endif + +#ifdef JEMALLOC_CC_SILENCE +# define UNUSED JEMALLOC_ATTR(unused) +#else +# define UNUSED +#endif + +#define ZU(z) ((size_t)z) +#define ZI(z) ((ssize_t)z) +#define QU(q) ((uint64_t)q) +#define QI(q) ((int64_t)q) + +#define KZU(z) ZU(z##ULL) +#define KZI(z) ZI(z##LL) +#define KQU(q) QU(q##ULL) +#define KQI(q) QI(q##LL) + +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif + +#ifndef JEMALLOC_HAS_RESTRICT +# define restrict +#endif diff --git a/sources/jemalloc/include-win/jemalloc/internal/mb.h b/sources/jemalloc/include-win/jemalloc/internal/mb.h new file mode 100755 index 00000000..e58da5c3 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/mb.h @@ -0,0 +1,115 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void mb_write(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) +#ifdef __i386__ +/* + * According to the Intel Architecture Software Developer's Manual, current + * processors execute instructions in order from the perspective of other + * processors in a multiprocessor system, but 1) Intel reserves the right to + * change that, and 2) the compiler's optimizer could re-order instructions if + * there weren't some form of barrier. Therefore, even if running on an + * architecture that does not need memory barriers (everything through at least + * i686), an "optimizer barrier" is necessary. + */ +JEMALLOC_INLINE void +mb_write(void) +{ + +# if 0 + /* This is a true memory barrier. */ + asm volatile ("pusha;" + "xor %%eax,%%eax;" + "cpuid;" + "popa;" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +# else + /* + * This is hopefully enough to keep the compiler from reordering + * instructions around this one. + */ + asm volatile ("nop;" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +# endif +} +#elif (defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("sfence" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__powerpc__) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("eieio" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__sparc__) && defined(__arch64__) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("membar #StoreStore" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__tile__) +JEMALLOC_INLINE void +mb_write(void) +{ + + __sync_synchronize(); +} +#else +/* + * This is much slower than a simple memory barrier, but the semantics of mutex + * unlock make this work. + */ +JEMALLOC_INLINE void +mb_write(void) +{ + malloc_mutex_t mtx; + + malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT); + malloc_mutex_lock(TSDN_NULL, &mtx); + malloc_mutex_unlock(TSDN_NULL, &mtx); +} +#endif +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/mutex.h b/sources/jemalloc/include-win/jemalloc/internal/mutex.h new file mode 100755 index 00000000..2b4b1c31 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/mutex.h @@ -0,0 +1,145 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct malloc_mutex_s malloc_mutex_t; + +#ifdef _WIN32 +# define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_INITIALIZER \ + {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +#elif (defined(JEMALLOC_OSSPIN)) +# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +#else +# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ + defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \ + WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +# else +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} +# endif +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct malloc_mutex_s { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + SRWLOCK lock; +# else + CRITICAL_SECTION lock; +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLock lock; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; +#else + pthread_mutex_t lock; +#endif + witness_t witness; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_LAZY_LOCK +extern bool isthreaded; +#else +# undef isthreaded /* Undo private_namespace.h definition. */ +# define isthreaded true +#endif + +bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank); +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); +bool malloc_mutex_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) +JEMALLOC_INLINE void +malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + witness_assert_not_owner(tsdn, &mutex->witness); + if (isthreaded) { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + AcquireSRWLockExclusive(&mutex->lock); +# else + EnterCriticalSection(&mutex->lock); +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_lock(&mutex->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockLock(&mutex->lock); +#else + pthread_mutex_lock(&mutex->lock); +#endif + } + witness_lock(tsdn, &mutex->witness); +} + +JEMALLOC_INLINE void +malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + witness_unlock(tsdn, &mutex->witness); + if (isthreaded) { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + ReleaseSRWLockExclusive(&mutex->lock); +# else + LeaveCriticalSection(&mutex->lock); +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_unlock(&mutex->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockUnlock(&mutex->lock); +#else + pthread_mutex_unlock(&mutex->lock); +#endif + } +} + +JEMALLOC_INLINE void +malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + witness_assert_owner(tsdn, &mutex->witness); +} + +JEMALLOC_INLINE void +malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + witness_assert_not_owner(tsdn, &mutex->witness); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/nstime.h b/sources/jemalloc/include-win/jemalloc/internal/nstime.h new file mode 100755 index 00000000..93b27dc8 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/nstime.h @@ -0,0 +1,48 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct nstime_s nstime_t; + +/* Maximum supported number of seconds (~584 years). */ +#define NSTIME_SEC_MAX KQU(18446744072) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct nstime_s { + uint64_t ns; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void nstime_init(nstime_t *time, uint64_t ns); +void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); +uint64_t nstime_ns(const nstime_t *time); +uint64_t nstime_sec(const nstime_t *time); +uint64_t nstime_nsec(const nstime_t *time); +void nstime_copy(nstime_t *time, const nstime_t *source); +int nstime_compare(const nstime_t *a, const nstime_t *b); +void nstime_add(nstime_t *time, const nstime_t *addend); +void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); +void nstime_imultiply(nstime_t *time, uint64_t multiplier); +void nstime_idivide(nstime_t *time, uint64_t divisor); +uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); +#ifdef JEMALLOC_JET +typedef bool (nstime_monotonic_t)(void); +extern nstime_monotonic_t *nstime_monotonic; +typedef bool (nstime_update_t)(nstime_t *); +extern nstime_update_t *nstime_update; +#else +bool nstime_monotonic(void); +bool nstime_update(nstime_t *time); +#endif + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/pages.h b/sources/jemalloc/include-win/jemalloc/internal/pages.h new file mode 100755 index 00000000..4ae9f156 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/pages.h @@ -0,0 +1,29 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *pages_map(void *addr, size_t size, bool *commit); +void pages_unmap(void *addr, size_t size); +void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, + size_t size, bool *commit); +bool pages_commit(void *addr, size_t size); +bool pages_decommit(void *addr, size_t size); +bool pages_purge(void *addr, size_t size); +bool pages_huge(void *addr, size_t size); +bool pages_nohuge(void *addr, size_t size); +void pages_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-win/jemalloc/internal/ph.h b/sources/jemalloc/include-win/jemalloc/internal/ph.h new file mode 100755 index 00000000..4f91c333 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/ph.h @@ -0,0 +1,345 @@ +/* + * A Pairing Heap implementation. + * + * "The Pairing Heap: A New Form of Self-Adjusting Heap" + * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf + * + * With auxiliary twopass list, described in a follow on paper. + * + * "Pairing Heaps: Experiments and Analysis" + * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf + * + ******************************************************************************* + */ + +#ifndef PH_H_ +#define PH_H_ + +/* Node structure. */ +#define phn(a_type) \ +struct { \ + a_type *phn_prev; \ + a_type *phn_next; \ + a_type *phn_lchild; \ +} + +/* Root structure. */ +#define ph(a_type) \ +struct { \ + a_type *ph_root; \ +} + +/* Internal utility macros. */ +#define phn_lchild_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_lchild) +#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ + a_phn->a_field.phn_lchild = a_lchild; \ +} while (0) + +#define phn_next_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_next) +#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ + a_phn->a_field.phn_prev = a_prev; \ +} while (0) + +#define phn_prev_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_prev) +#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ + a_phn->a_field.phn_next = a_next; \ +} while (0) + +#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ + a_type *phn0child; \ + \ + assert(a_phn0 != NULL); \ + assert(a_phn1 != NULL); \ + assert(a_cmp(a_phn0, a_phn1) <= 0); \ + \ + phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ + phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ + phn_next_set(a_type, a_field, a_phn1, phn0child); \ + if (phn0child != NULL) \ + phn_prev_set(a_type, a_field, phn0child, a_phn1); \ + phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ +} while (0) + +#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ + if (a_phn0 == NULL) \ + r_phn = a_phn1; \ + else if (a_phn1 == NULL) \ + r_phn = a_phn0; \ + else if (a_cmp(a_phn0, a_phn1) < 0) { \ + phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ + a_cmp); \ + r_phn = a_phn0; \ + } else { \ + phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ + a_cmp); \ + r_phn = a_phn1; \ + } \ +} while (0) + +#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *head = NULL; \ + a_type *tail = NULL; \ + a_type *phn0 = a_phn; \ + a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ + \ + /* \ + * Multipass merge, wherein the first two elements of a FIFO \ + * are repeatedly merged, and each result is appended to the \ + * singly linked FIFO, until the FIFO contains only a single \ + * element. We start with a sibling list but no reference to \ + * its tail, so we do a single pass over the sibling list to \ + * populate the FIFO. \ + */ \ + if (phn1 != NULL) { \ + a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ + if (phnrest != NULL) \ + phn_prev_set(a_type, a_field, phnrest, NULL); \ + phn_prev_set(a_type, a_field, phn0, NULL); \ + phn_next_set(a_type, a_field, phn0, NULL); \ + phn_prev_set(a_type, a_field, phn1, NULL); \ + phn_next_set(a_type, a_field, phn1, NULL); \ + phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ + head = tail = phn0; \ + phn0 = phnrest; \ + while (phn0 != NULL) { \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + phnrest = phn_next_get(a_type, a_field, \ + phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, \ + phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, \ + NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + phn_prev_set(a_type, a_field, phn1, \ + NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = phnrest; \ + } else { \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = NULL; \ + } \ + } \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + while (true) { \ + head = phn_next_get(a_type, a_field, \ + phn1); \ + assert(phn_prev_get(a_type, a_field, \ + phn0) == NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + assert(phn_prev_get(a_type, a_field, \ + phn1) == NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + if (head == NULL) \ + break; \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, \ + phn0); \ + } \ + } \ + } \ + r_phn = phn0; \ +} while (0) + +#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ + a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ + if (phn != NULL) { \ + phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_prev_set(a_type, a_field, phn, NULL); \ + ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ + assert(phn_next_get(a_type, a_field, phn) == NULL); \ + phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ + a_ph->ph_root); \ + } \ +} while (0) + +#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ + if (lchild == NULL) \ + r_phn = NULL; \ + else { \ + ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ + r_phn); \ + } \ +} while (0) + +/* + * The ph_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to ph_gen(). + */ +#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ +a_attr void a_prefix##new(a_ph_type *ph); \ +a_attr bool a_prefix##empty(a_ph_type *ph); \ +a_attr a_type *a_prefix##first(a_ph_type *ph); \ +a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ +a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ +a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); + +/* + * The ph_gen() macro generates a type-specific pairing heap implementation, + * based on the above cpp macros. + */ +#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_ph_type *ph) \ +{ \ + \ + memset(ph, 0, sizeof(ph(a_type))); \ +} \ +a_attr bool \ +a_prefix##empty(a_ph_type *ph) \ +{ \ + \ + return (ph->ph_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_ph_type *ph) \ +{ \ + \ + if (ph->ph_root == NULL) \ + return (NULL); \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + return (ph->ph_root); \ +} \ +a_attr void \ +a_prefix##insert(a_ph_type *ph, a_type *phn) \ +{ \ + \ + memset(&phn->a_field, 0, sizeof(phn(a_type))); \ + \ + /* \ + * Treat the root as an aux list during insertion, and lazily \ + * merge during a_prefix##remove_first(). For elements that \ + * are inserted, then removed via a_prefix##remove() before the \ + * aux list is ever processed, this makes insert/remove \ + * constant-time, whereas eager merging would make insert \ + * O(log n). \ + */ \ + if (ph->ph_root == NULL) \ + ph->ph_root = phn; \ + else { \ + phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ + a_field, ph->ph_root)); \ + if (phn_next_get(a_type, a_field, ph->ph_root) != \ + NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, ph->ph_root), \ + phn); \ + } \ + phn_prev_set(a_type, a_field, phn, ph->ph_root); \ + phn_next_set(a_type, a_field, ph->ph_root, phn); \ + } \ +} \ +a_attr a_type * \ +a_prefix##remove_first(a_ph_type *ph) \ +{ \ + a_type *ret; \ + \ + if (ph->ph_root == NULL) \ + return (NULL); \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + \ + ret = ph->ph_root; \ + \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + \ + return (ret); \ +} \ +a_attr void \ +a_prefix##remove(a_ph_type *ph, a_type *phn) \ +{ \ + a_type *replace, *parent; \ + \ + /* \ + * We can delete from aux list without merging it, but we need \ + * to merge if we are dealing with the root node. \ + */ \ + if (ph->ph_root == phn) { \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + if (ph->ph_root == phn) { \ + ph_merge_children(a_type, a_field, ph->ph_root, \ + a_cmp, ph->ph_root); \ + return; \ + } \ + } \ + \ + /* Get parent (if phn is leftmost child) before mutating. */ \ + if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ + if (phn_lchild_get(a_type, a_field, parent) != phn) \ + parent = NULL; \ + } \ + /* Find a possible replacement node, and link to parent. */ \ + ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ + /* Set next/prev for sibling linked list. */ \ + if (replace != NULL) { \ + if (parent != NULL) { \ + phn_prev_set(a_type, a_field, replace, parent); \ + phn_lchild_set(a_type, a_field, parent, \ + replace); \ + } else { \ + phn_prev_set(a_type, a_field, replace, \ + phn_prev_get(a_type, a_field, phn)); \ + if (phn_prev_get(a_type, a_field, phn) != \ + NULL) { \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + replace); \ + } \ + } \ + phn_next_set(a_type, a_field, replace, \ + phn_next_get(a_type, a_field, phn)); \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + replace); \ + } \ + } else { \ + if (parent != NULL) { \ + a_type *next = phn_next_get(a_type, a_field, \ + phn); \ + phn_lchild_set(a_type, a_field, parent, next); \ + if (next != NULL) { \ + phn_prev_set(a_type, a_field, next, \ + parent); \ + } \ + } else { \ + assert(phn_prev_get(a_type, a_field, phn) != \ + NULL); \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + phn_next_get(a_type, a_field, phn)); \ + } \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + phn_prev_get(a_type, a_field, phn)); \ + } \ + } \ +} + +#endif /* PH_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_namespace.h b/sources/jemalloc/include-win/jemalloc/internal/private_namespace.h new file mode 100755 index 00000000..7ebeeba8 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/private_namespace.h @@ -0,0 +1,639 @@ +#define a0dalloc JEMALLOC_N(a0dalloc) +#define a0get JEMALLOC_N(a0get) +#define a0malloc JEMALLOC_N(a0malloc) +#define arena_aalloc JEMALLOC_N(arena_aalloc) +#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) +#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge) +#define arena_bin_index JEMALLOC_N(arena_bin_index) +#define arena_bin_info JEMALLOC_N(arena_bin_info) +#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const) +#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable) +#define arena_boot JEMALLOC_N(arena_boot) +#define arena_choose JEMALLOC_N(arena_choose) +#define arena_choose_hard JEMALLOC_N(arena_choose_hard) +#define arena_choose_impl JEMALLOC_N(arena_choose_impl) +#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge) +#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert) +#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove) +#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge) +#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand) +#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink) +#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar) +#define arena_cleanup JEMALLOC_N(arena_cleanup) +#define arena_dalloc JEMALLOC_N(arena_dalloc) +#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) +#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked) +#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) +#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) +#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) +#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked) +#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) +#define arena_decay_tick JEMALLOC_N(arena_decay_tick) +#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks) +#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get) +#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set) +#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get) +#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set) +#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) +#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) +#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next) +#define arena_get JEMALLOC_N(arena_get) +#define arena_ichoose JEMALLOC_N(arena_ichoose) +#define arena_init JEMALLOC_N(arena_init) +#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get) +#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set) +#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get) +#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set) +#define arena_malloc JEMALLOC_N(arena_malloc) +#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard) +#define arena_malloc_large JEMALLOC_N(arena_malloc_large) +#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) +#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) +#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get) +#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) +#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) +#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set) +#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) +#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) +#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) +#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) +#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode) +#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode) +#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) +#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) +#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) +#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) +#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) +#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) +#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const) +#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable) +#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) +#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) +#define arena_maxrun JEMALLOC_N(arena_maxrun) +#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge) +#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add) +#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get) +#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub) +#define arena_migrate JEMALLOC_N(arena_migrate) +#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const) +#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable) +#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind) +#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages) +#define arena_new JEMALLOC_N(arena_new) +#define arena_node_alloc JEMALLOC_N(arena_node_alloc) +#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc) +#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec) +#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get) +#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc) +#define arena_palloc JEMALLOC_N(arena_palloc) +#define arena_postfork_child JEMALLOC_N(arena_postfork_child) +#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) +#define arena_prefork0 JEMALLOC_N(arena_prefork0) +#define arena_prefork1 JEMALLOC_N(arena_prefork1) +#define arena_prefork2 JEMALLOC_N(arena_prefork2) +#define arena_prefork3 JEMALLOC_N(arena_prefork3) +#define arena_prof_accum JEMALLOC_N(arena_prof_accum) +#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) +#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) +#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) +#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get) +#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset) +#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set) +#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) +#define arena_purge JEMALLOC_N(arena_purge) +#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small) +#define arena_ralloc JEMALLOC_N(arena_ralloc) +#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) +#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) +#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm) +#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) +#define arena_reset JEMALLOC_N(arena_reset) +#define arena_run_regind JEMALLOC_N(arena_run_regind) +#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm) +#define arena_salloc JEMALLOC_N(arena_salloc) +#define arena_sdalloc JEMALLOC_N(arena_sdalloc) +#define arena_stats_merge JEMALLOC_N(arena_stats_merge) +#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) +#define arena_tdata_get JEMALLOC_N(arena_tdata_get) +#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard) +#define arenas JEMALLOC_N(arenas) +#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup) +#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup) +#define atomic_add_p JEMALLOC_N(atomic_add_p) +#define atomic_add_u JEMALLOC_N(atomic_add_u) +#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) +#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) +#define atomic_add_z JEMALLOC_N(atomic_add_z) +#define atomic_cas_p JEMALLOC_N(atomic_cas_p) +#define atomic_cas_u JEMALLOC_N(atomic_cas_u) +#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32) +#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64) +#define atomic_cas_z JEMALLOC_N(atomic_cas_z) +#define atomic_sub_p JEMALLOC_N(atomic_sub_p) +#define atomic_sub_u JEMALLOC_N(atomic_sub_u) +#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) +#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) +#define atomic_sub_z JEMALLOC_N(atomic_sub_z) +#define atomic_write_p JEMALLOC_N(atomic_write_p) +#define atomic_write_u JEMALLOC_N(atomic_write_u) +#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32) +#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64) +#define atomic_write_z JEMALLOC_N(atomic_write_z) +#define base_alloc JEMALLOC_N(base_alloc) +#define base_boot JEMALLOC_N(base_boot) +#define base_postfork_child JEMALLOC_N(base_postfork_child) +#define base_postfork_parent JEMALLOC_N(base_postfork_parent) +#define base_prefork JEMALLOC_N(base_prefork) +#define base_stats_get JEMALLOC_N(base_stats_get) +#define bitmap_full JEMALLOC_N(bitmap_full) +#define bitmap_get JEMALLOC_N(bitmap_get) +#define bitmap_info_init JEMALLOC_N(bitmap_info_init) +#define bitmap_init JEMALLOC_N(bitmap_init) +#define bitmap_set JEMALLOC_N(bitmap_set) +#define bitmap_sfu JEMALLOC_N(bitmap_sfu) +#define bitmap_size JEMALLOC_N(bitmap_size) +#define bitmap_unset JEMALLOC_N(bitmap_unset) +#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc) +#define bootstrap_free JEMALLOC_N(bootstrap_free) +#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc) +#define bt_init JEMALLOC_N(bt_init) +#define buferror JEMALLOC_N(buferror) +#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base) +#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache) +#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) +#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) +#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper) +#define chunk_boot JEMALLOC_N(chunk_boot) +#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache) +#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap) +#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper) +#define chunk_deregister JEMALLOC_N(chunk_deregister) +#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) +#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable) +#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) +#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) +#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default) +#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get) +#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set) +#define chunk_in_dss JEMALLOC_N(chunk_in_dss) +#define chunk_lookup JEMALLOC_N(chunk_lookup) +#define chunk_npages JEMALLOC_N(chunk_npages) +#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper) +#define chunk_register JEMALLOC_N(chunk_register) +#define chunks_rtree JEMALLOC_N(chunks_rtree) +#define chunksize JEMALLOC_N(chunksize) +#define chunksize_mask JEMALLOC_N(chunksize_mask) +#define ckh_count JEMALLOC_N(ckh_count) +#define ckh_delete JEMALLOC_N(ckh_delete) +#define ckh_insert JEMALLOC_N(ckh_insert) +#define ckh_iter JEMALLOC_N(ckh_iter) +#define ckh_new JEMALLOC_N(ckh_new) +#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) +#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) +#define ckh_remove JEMALLOC_N(ckh_remove) +#define ckh_search JEMALLOC_N(ckh_search) +#define ckh_string_hash JEMALLOC_N(ckh_string_hash) +#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) +#define ctl_boot JEMALLOC_N(ctl_boot) +#define ctl_bymib JEMALLOC_N(ctl_bymib) +#define ctl_byname JEMALLOC_N(ctl_byname) +#define ctl_nametomib JEMALLOC_N(ctl_nametomib) +#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) +#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) +#define ctl_prefork JEMALLOC_N(ctl_prefork) +#define decay_ticker_get JEMALLOC_N(decay_ticker_get) +#define dss_prec_names JEMALLOC_N(dss_prec_names) +#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get) +#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set) +#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get) +#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set) +#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get) +#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set) +#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get) +#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set) +#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert) +#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init) +#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove) +#define extent_node_init JEMALLOC_N(extent_node_init) +#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get) +#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set) +#define extent_node_size_get JEMALLOC_N(extent_node_size_get) +#define extent_node_size_set JEMALLOC_N(extent_node_size_set) +#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get) +#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set) +#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get) +#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set) +#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil) +#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor) +#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy) +#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse) +#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty) +#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) +#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) +#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) +#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) +#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) +#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) +#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) +#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) +#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) +#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) +#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) +#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) +#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) +#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) +#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) +#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) +#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy) +#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse) +#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty) +#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first) +#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert) +#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter) +#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse) +#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start) +#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last) +#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new) +#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next) +#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch) +#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev) +#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch) +#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove) +#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter) +#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse) +#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start) +#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search) +#define ffs_llu JEMALLOC_N(ffs_llu) +#define ffs_lu JEMALLOC_N(ffs_lu) +#define ffs_u JEMALLOC_N(ffs_u) +#define ffs_u32 JEMALLOC_N(ffs_u32) +#define ffs_u64 JEMALLOC_N(ffs_u64) +#define ffs_zu JEMALLOC_N(ffs_zu) +#define get_errno JEMALLOC_N(get_errno) +#define hash JEMALLOC_N(hash) +#define hash_fmix_32 JEMALLOC_N(hash_fmix_32) +#define hash_fmix_64 JEMALLOC_N(hash_fmix_64) +#define hash_get_block_32 JEMALLOC_N(hash_get_block_32) +#define hash_get_block_64 JEMALLOC_N(hash_get_block_64) +#define hash_rotl_32 JEMALLOC_N(hash_rotl_32) +#define hash_rotl_64 JEMALLOC_N(hash_rotl_64) +#define hash_x64_128 JEMALLOC_N(hash_x64_128) +#define hash_x86_128 JEMALLOC_N(hash_x86_128) +#define hash_x86_32 JEMALLOC_N(hash_x86_32) +#define huge_aalloc JEMALLOC_N(huge_aalloc) +#define huge_dalloc JEMALLOC_N(huge_dalloc) +#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) +#define huge_malloc JEMALLOC_N(huge_malloc) +#define huge_palloc JEMALLOC_N(huge_palloc) +#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get) +#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset) +#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set) +#define huge_ralloc JEMALLOC_N(huge_ralloc) +#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) +#define huge_salloc JEMALLOC_N(huge_salloc) +#define iaalloc JEMALLOC_N(iaalloc) +#define ialloc JEMALLOC_N(ialloc) +#define iallocztm JEMALLOC_N(iallocztm) +#define iarena_cleanup JEMALLOC_N(iarena_cleanup) +#define idalloc JEMALLOC_N(idalloc) +#define idalloctm JEMALLOC_N(idalloctm) +#define in_valgrind JEMALLOC_N(in_valgrind) +#define index2size JEMALLOC_N(index2size) +#define index2size_compute JEMALLOC_N(index2size_compute) +#define index2size_lookup JEMALLOC_N(index2size_lookup) +#define index2size_tab JEMALLOC_N(index2size_tab) +#define ipalloc JEMALLOC_N(ipalloc) +#define ipalloct JEMALLOC_N(ipalloct) +#define ipallocztm JEMALLOC_N(ipallocztm) +#define iqalloc JEMALLOC_N(iqalloc) +#define iralloc JEMALLOC_N(iralloc) +#define iralloct JEMALLOC_N(iralloct) +#define iralloct_realign JEMALLOC_N(iralloct_realign) +#define isalloc JEMALLOC_N(isalloc) +#define isdalloct JEMALLOC_N(isdalloct) +#define isqalloc JEMALLOC_N(isqalloc) +#define isthreaded JEMALLOC_N(isthreaded) +#define ivsalloc JEMALLOC_N(ivsalloc) +#define ixalloc JEMALLOC_N(ixalloc) +#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) +#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) +#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) +#define large_maxclass JEMALLOC_N(large_maxclass) +#define lg_floor JEMALLOC_N(lg_floor) +#define lg_prof_sample JEMALLOC_N(lg_prof_sample) +#define malloc_cprintf JEMALLOC_N(malloc_cprintf) +#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner) +#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner) +#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot) +#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) +#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) +#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) +#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) +#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) +#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) +#define malloc_printf JEMALLOC_N(malloc_printf) +#define malloc_snprintf JEMALLOC_N(malloc_snprintf) +#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) +#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0) +#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1) +#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) +#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) +#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) +#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup) +#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) +#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) +#define malloc_write JEMALLOC_N(malloc_write) +#define map_bias JEMALLOC_N(map_bias) +#define map_misc_offset JEMALLOC_N(map_misc_offset) +#define mb_write JEMALLOC_N(mb_write) +#define narenas_auto JEMALLOC_N(narenas_auto) +#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup) +#define narenas_total_get JEMALLOC_N(narenas_total_get) +#define ncpus JEMALLOC_N(ncpus) +#define nhbins JEMALLOC_N(nhbins) +#define nhclasses JEMALLOC_N(nhclasses) +#define nlclasses JEMALLOC_N(nlclasses) +#define nstime_add JEMALLOC_N(nstime_add) +#define nstime_compare JEMALLOC_N(nstime_compare) +#define nstime_copy JEMALLOC_N(nstime_copy) +#define nstime_divide JEMALLOC_N(nstime_divide) +#define nstime_idivide JEMALLOC_N(nstime_idivide) +#define nstime_imultiply JEMALLOC_N(nstime_imultiply) +#define nstime_init JEMALLOC_N(nstime_init) +#define nstime_init2 JEMALLOC_N(nstime_init2) +#define nstime_monotonic JEMALLOC_N(nstime_monotonic) +#define nstime_ns JEMALLOC_N(nstime_ns) +#define nstime_nsec JEMALLOC_N(nstime_nsec) +#define nstime_sec JEMALLOC_N(nstime_sec) +#define nstime_subtract JEMALLOC_N(nstime_subtract) +#define nstime_update JEMALLOC_N(nstime_update) +#define opt_abort JEMALLOC_N(opt_abort) +#define opt_decay_time JEMALLOC_N(opt_decay_time) +#define opt_dss JEMALLOC_N(opt_dss) +#define opt_junk JEMALLOC_N(opt_junk) +#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc) +#define opt_junk_free JEMALLOC_N(opt_junk_free) +#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) +#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) +#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) +#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) +#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) +#define opt_narenas JEMALLOC_N(opt_narenas) +#define opt_prof JEMALLOC_N(opt_prof) +#define opt_prof_accum JEMALLOC_N(opt_prof_accum) +#define opt_prof_active JEMALLOC_N(opt_prof_active) +#define opt_prof_final JEMALLOC_N(opt_prof_final) +#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) +#define opt_prof_leak JEMALLOC_N(opt_prof_leak) +#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) +#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init) +#define opt_purge JEMALLOC_N(opt_purge) +#define opt_quarantine JEMALLOC_N(opt_quarantine) +#define opt_redzone JEMALLOC_N(opt_redzone) +#define opt_stats_print JEMALLOC_N(opt_stats_print) +#define opt_tcache JEMALLOC_N(opt_tcache) +#define opt_thp JEMALLOC_N(opt_thp) +#define opt_utrace JEMALLOC_N(opt_utrace) +#define opt_xmalloc JEMALLOC_N(opt_xmalloc) +#define opt_zero JEMALLOC_N(opt_zero) +#define p2rz JEMALLOC_N(p2rz) +#define pages_boot JEMALLOC_N(pages_boot) +#define pages_commit JEMALLOC_N(pages_commit) +#define pages_decommit JEMALLOC_N(pages_decommit) +#define pages_huge JEMALLOC_N(pages_huge) +#define pages_map JEMALLOC_N(pages_map) +#define pages_nohuge JEMALLOC_N(pages_nohuge) +#define pages_purge JEMALLOC_N(pages_purge) +#define pages_trim JEMALLOC_N(pages_trim) +#define pages_unmap JEMALLOC_N(pages_unmap) +#define pind2sz JEMALLOC_N(pind2sz) +#define pind2sz_compute JEMALLOC_N(pind2sz_compute) +#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup) +#define pind2sz_tab JEMALLOC_N(pind2sz_tab) +#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32) +#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64) +#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu) +#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32) +#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64) +#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu) +#define prng_range_u32 JEMALLOC_N(prng_range_u32) +#define prng_range_u64 JEMALLOC_N(prng_range_u64) +#define prng_range_zu JEMALLOC_N(prng_range_zu) +#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32) +#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64) +#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu) +#define prof_active JEMALLOC_N(prof_active) +#define prof_active_get JEMALLOC_N(prof_active_get) +#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked) +#define prof_active_set JEMALLOC_N(prof_active_set) +#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep) +#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback) +#define prof_backtrace JEMALLOC_N(prof_backtrace) +#define prof_boot0 JEMALLOC_N(prof_boot0) +#define prof_boot1 JEMALLOC_N(prof_boot1) +#define prof_boot2 JEMALLOC_N(prof_boot2) +#define prof_bt_count JEMALLOC_N(prof_bt_count) +#define prof_dump_header JEMALLOC_N(prof_dump_header) +#define prof_dump_open JEMALLOC_N(prof_dump_open) +#define prof_free JEMALLOC_N(prof_free) +#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object) +#define prof_gdump JEMALLOC_N(prof_gdump) +#define prof_gdump_get JEMALLOC_N(prof_gdump_get) +#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked) +#define prof_gdump_set JEMALLOC_N(prof_gdump_set) +#define prof_gdump_val JEMALLOC_N(prof_gdump_val) +#define prof_idump JEMALLOC_N(prof_idump) +#define prof_interval JEMALLOC_N(prof_interval) +#define prof_lookup JEMALLOC_N(prof_lookup) +#define prof_malloc JEMALLOC_N(prof_malloc) +#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object) +#define prof_mdump JEMALLOC_N(prof_mdump) +#define prof_postfork_child JEMALLOC_N(prof_postfork_child) +#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) +#define prof_prefork0 JEMALLOC_N(prof_prefork0) +#define prof_prefork1 JEMALLOC_N(prof_prefork1) +#define prof_realloc JEMALLOC_N(prof_realloc) +#define prof_reset JEMALLOC_N(prof_reset) +#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) +#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) +#define prof_tctx_get JEMALLOC_N(prof_tctx_get) +#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset) +#define prof_tctx_set JEMALLOC_N(prof_tctx_set) +#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) +#define prof_tdata_count JEMALLOC_N(prof_tdata_count) +#define prof_tdata_get JEMALLOC_N(prof_tdata_get) +#define prof_tdata_init JEMALLOC_N(prof_tdata_init) +#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit) +#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get) +#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get) +#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set) +#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set) +#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get) +#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set) +#define psz2ind JEMALLOC_N(psz2ind) +#define psz2u JEMALLOC_N(psz2u) +#define purge_mode_names JEMALLOC_N(purge_mode_names) +#define quarantine JEMALLOC_N(quarantine) +#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) +#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work) +#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) +#define rtree_child_read JEMALLOC_N(rtree_child_read) +#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard) +#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread) +#define rtree_delete JEMALLOC_N(rtree_delete) +#define rtree_get JEMALLOC_N(rtree_get) +#define rtree_new JEMALLOC_N(rtree_new) +#define rtree_node_valid JEMALLOC_N(rtree_node_valid) +#define rtree_set JEMALLOC_N(rtree_set) +#define rtree_start_level JEMALLOC_N(rtree_start_level) +#define rtree_subkey JEMALLOC_N(rtree_subkey) +#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read) +#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard) +#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread) +#define rtree_val_read JEMALLOC_N(rtree_val_read) +#define rtree_val_write JEMALLOC_N(rtree_val_write) +#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) +#define run_quantize_floor JEMALLOC_N(run_quantize_floor) +#define s2u JEMALLOC_N(s2u) +#define s2u_compute JEMALLOC_N(s2u_compute) +#define s2u_lookup JEMALLOC_N(s2u_lookup) +#define sa2u JEMALLOC_N(sa2u) +#define set_errno JEMALLOC_N(set_errno) +#define size2index JEMALLOC_N(size2index) +#define size2index_compute JEMALLOC_N(size2index_compute) +#define size2index_lookup JEMALLOC_N(size2index_lookup) +#define size2index_tab JEMALLOC_N(size2index_tab) +#define spin_adaptive JEMALLOC_N(spin_adaptive) +#define spin_init JEMALLOC_N(spin_init) +#define stats_cactive JEMALLOC_N(stats_cactive) +#define stats_cactive_add JEMALLOC_N(stats_cactive_add) +#define stats_cactive_get JEMALLOC_N(stats_cactive_get) +#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) +#define stats_print JEMALLOC_N(stats_print) +#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) +#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) +#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) +#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) +#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate) +#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) +#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) +#define tcache_bin_info JEMALLOC_N(tcache_bin_info) +#define tcache_boot JEMALLOC_N(tcache_boot) +#define tcache_cleanup JEMALLOC_N(tcache_cleanup) +#define tcache_create JEMALLOC_N(tcache_create) +#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) +#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) +#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup) +#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) +#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) +#define tcache_event JEMALLOC_N(tcache_event) +#define tcache_event_hard JEMALLOC_N(tcache_event_hard) +#define tcache_flush JEMALLOC_N(tcache_flush) +#define tcache_get JEMALLOC_N(tcache_get) +#define tcache_get_hard JEMALLOC_N(tcache_get_hard) +#define tcache_maxclass JEMALLOC_N(tcache_maxclass) +#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child) +#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent) +#define tcache_prefork JEMALLOC_N(tcache_prefork) +#define tcache_salloc JEMALLOC_N(tcache_salloc) +#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) +#define tcaches JEMALLOC_N(tcaches) +#define tcaches_create JEMALLOC_N(tcaches_create) +#define tcaches_destroy JEMALLOC_N(tcaches_destroy) +#define tcaches_flush JEMALLOC_N(tcaches_flush) +#define tcaches_get JEMALLOC_N(tcaches_get) +#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup) +#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup) +#define ticker_copy JEMALLOC_N(ticker_copy) +#define ticker_init JEMALLOC_N(ticker_init) +#define ticker_read JEMALLOC_N(ticker_read) +#define ticker_tick JEMALLOC_N(ticker_tick) +#define ticker_ticks JEMALLOC_N(ticker_ticks) +#define tsd_arena_get JEMALLOC_N(tsd_arena_get) +#define tsd_arena_set JEMALLOC_N(tsd_arena_set) +#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get) +#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get) +#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set) +#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get) +#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get) +#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set) +#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get) +#define tsd_boot JEMALLOC_N(tsd_boot) +#define tsd_boot0 JEMALLOC_N(tsd_boot0) +#define tsd_boot1 JEMALLOC_N(tsd_boot1) +#define tsd_booted JEMALLOC_N(tsd_booted) +#define tsd_booted_get JEMALLOC_N(tsd_booted_get) +#define tsd_cleanup JEMALLOC_N(tsd_cleanup) +#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper) +#define tsd_fetch JEMALLOC_N(tsd_fetch) +#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl) +#define tsd_get JEMALLOC_N(tsd_get) +#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates) +#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get) +#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set) +#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get) +#define tsd_initialized JEMALLOC_N(tsd_initialized) +#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion) +#define tsd_init_finish JEMALLOC_N(tsd_init_finish) +#define tsd_init_head JEMALLOC_N(tsd_init_head) +#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get) +#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set) +#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get) +#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get) +#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set) +#define tsd_nominal JEMALLOC_N(tsd_nominal) +#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get) +#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set) +#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get) +#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get) +#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set) +#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get) +#define tsd_set JEMALLOC_N(tsd_set) +#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get) +#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set) +#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get) +#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get) +#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set) +#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get) +#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get) +#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set) +#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get) +#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get) +#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set) +#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get) +#define tsd_tls JEMALLOC_N(tsd_tls) +#define tsd_tsd JEMALLOC_N(tsd_tsd) +#define tsd_tsdn JEMALLOC_N(tsd_tsdn) +#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get) +#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set) +#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get) +#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get) +#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set) +#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get) +#define tsdn_fetch JEMALLOC_N(tsdn_fetch) +#define tsdn_null JEMALLOC_N(tsdn_null) +#define tsdn_tsd JEMALLOC_N(tsdn_tsd) +#define u2rz JEMALLOC_N(u2rz) +#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block) +#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined) +#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess) +#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined) +#define witness_assert_depth JEMALLOC_N(witness_assert_depth) +#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank) +#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless) +#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner) +#define witness_assert_owner JEMALLOC_N(witness_assert_owner) +#define witness_depth_error JEMALLOC_N(witness_depth_error) +#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup) +#define witness_init JEMALLOC_N(witness_init) +#define witness_lock JEMALLOC_N(witness_lock) +#define witness_lock_error JEMALLOC_N(witness_lock_error) +#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) +#define witness_owner JEMALLOC_N(witness_owner) +#define witness_owner_error JEMALLOC_N(witness_owner_error) +#define witness_postfork_child JEMALLOC_N(witness_postfork_child) +#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent) +#define witness_prefork JEMALLOC_N(witness_prefork) +#define witness_unlock JEMALLOC_N(witness_unlock) +#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup) +#define zone_register JEMALLOC_N(zone_register) diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_namespace.sh b/sources/jemalloc/include-win/jemalloc/internal/private_namespace.sh new file mode 100755 index 00000000..cd25eb30 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/private_namespace.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +for symbol in `cat $1` ; do + echo "#define ${symbol} JEMALLOC_N(${symbol})" +done diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_symbols.txt b/sources/jemalloc/include-win/jemalloc/internal/private_symbols.txt new file mode 100755 index 00000000..60b57e5a --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/private_symbols.txt @@ -0,0 +1,639 @@ +a0dalloc +a0get +a0malloc +arena_aalloc +arena_alloc_junk_small +arena_basic_stats_merge +arena_bin_index +arena_bin_info +arena_bitselm_get_const +arena_bitselm_get_mutable +arena_boot +arena_choose +arena_choose_hard +arena_choose_impl +arena_chunk_alloc_huge +arena_chunk_cache_maybe_insert +arena_chunk_cache_maybe_remove +arena_chunk_dalloc_huge +arena_chunk_ralloc_huge_expand +arena_chunk_ralloc_huge_shrink +arena_chunk_ralloc_huge_similar +arena_cleanup +arena_dalloc +arena_dalloc_bin +arena_dalloc_bin_junked_locked +arena_dalloc_junk_large +arena_dalloc_junk_small +arena_dalloc_large +arena_dalloc_large_junked_locked +arena_dalloc_small +arena_decay_tick +arena_decay_ticks +arena_decay_time_default_get +arena_decay_time_default_set +arena_decay_time_get +arena_decay_time_set +arena_dss_prec_get +arena_dss_prec_set +arena_extent_sn_next +arena_get +arena_ichoose +arena_init +arena_lg_dirty_mult_default_get +arena_lg_dirty_mult_default_set +arena_lg_dirty_mult_get +arena_lg_dirty_mult_set +arena_malloc +arena_malloc_hard +arena_malloc_large +arena_mapbits_allocated_get +arena_mapbits_binind_get +arena_mapbits_decommitted_get +arena_mapbits_dirty_get +arena_mapbits_get +arena_mapbits_internal_set +arena_mapbits_large_binind_set +arena_mapbits_large_get +arena_mapbits_large_set +arena_mapbits_large_size_get +arena_mapbits_size_decode +arena_mapbits_size_encode +arena_mapbits_small_runind_get +arena_mapbits_small_set +arena_mapbits_unallocated_set +arena_mapbits_unallocated_size_get +arena_mapbits_unallocated_size_set +arena_mapbits_unzeroed_get +arena_mapbitsp_get_const +arena_mapbitsp_get_mutable +arena_mapbitsp_read +arena_mapbitsp_write +arena_maxrun +arena_maybe_purge +arena_metadata_allocated_add +arena_metadata_allocated_get +arena_metadata_allocated_sub +arena_migrate +arena_miscelm_get_const +arena_miscelm_get_mutable +arena_miscelm_to_pageind +arena_miscelm_to_rpages +arena_new +arena_node_alloc +arena_node_dalloc +arena_nthreads_dec +arena_nthreads_get +arena_nthreads_inc +arena_palloc +arena_postfork_child +arena_postfork_parent +arena_prefork0 +arena_prefork1 +arena_prefork2 +arena_prefork3 +arena_prof_accum +arena_prof_accum_impl +arena_prof_accum_locked +arena_prof_promoted +arena_prof_tctx_get +arena_prof_tctx_reset +arena_prof_tctx_set +arena_ptr_small_binind_get +arena_purge +arena_quarantine_junk_small +arena_ralloc +arena_ralloc_junk_large +arena_ralloc_no_move +arena_rd_to_miscelm +arena_redzone_corruption +arena_reset +arena_run_regind +arena_run_to_miscelm +arena_salloc +arena_sdalloc +arena_stats_merge +arena_tcache_fill_small +arena_tdata_get +arena_tdata_get_hard +arenas +arenas_tdata_bypass_cleanup +arenas_tdata_cleanup +atomic_add_p +atomic_add_u +atomic_add_uint32 +atomic_add_uint64 +atomic_add_z +atomic_cas_p +atomic_cas_u +atomic_cas_uint32 +atomic_cas_uint64 +atomic_cas_z +atomic_sub_p +atomic_sub_u +atomic_sub_uint32 +atomic_sub_uint64 +atomic_sub_z +atomic_write_p +atomic_write_u +atomic_write_uint32 +atomic_write_uint64 +atomic_write_z +base_alloc +base_boot +base_postfork_child +base_postfork_parent +base_prefork +base_stats_get +bitmap_full +bitmap_get +bitmap_info_init +bitmap_init +bitmap_set +bitmap_sfu +bitmap_size +bitmap_unset +bootstrap_calloc +bootstrap_free +bootstrap_malloc +bt_init +buferror +chunk_alloc_base +chunk_alloc_cache +chunk_alloc_dss +chunk_alloc_mmap +chunk_alloc_wrapper +chunk_boot +chunk_dalloc_cache +chunk_dalloc_mmap +chunk_dalloc_wrapper +chunk_deregister +chunk_dss_boot +chunk_dss_mergeable +chunk_dss_prec_get +chunk_dss_prec_set +chunk_hooks_default +chunk_hooks_get +chunk_hooks_set +chunk_in_dss +chunk_lookup +chunk_npages +chunk_purge_wrapper +chunk_register +chunks_rtree +chunksize +chunksize_mask +ckh_count +ckh_delete +ckh_insert +ckh_iter +ckh_new +ckh_pointer_hash +ckh_pointer_keycomp +ckh_remove +ckh_search +ckh_string_hash +ckh_string_keycomp +ctl_boot +ctl_bymib +ctl_byname +ctl_nametomib +ctl_postfork_child +ctl_postfork_parent +ctl_prefork +decay_ticker_get +dss_prec_names +extent_node_achunk_get +extent_node_achunk_set +extent_node_addr_get +extent_node_addr_set +extent_node_arena_get +extent_node_arena_set +extent_node_committed_get +extent_node_committed_set +extent_node_dirty_insert +extent_node_dirty_linkage_init +extent_node_dirty_remove +extent_node_init +extent_node_prof_tctx_get +extent_node_prof_tctx_set +extent_node_size_get +extent_node_size_set +extent_node_sn_get +extent_node_sn_set +extent_node_zeroed_get +extent_node_zeroed_set +extent_size_quantize_ceil +extent_size_quantize_floor +extent_tree_ad_destroy +extent_tree_ad_destroy_recurse +extent_tree_ad_empty +extent_tree_ad_first +extent_tree_ad_insert +extent_tree_ad_iter +extent_tree_ad_iter_recurse +extent_tree_ad_iter_start +extent_tree_ad_last +extent_tree_ad_new +extent_tree_ad_next +extent_tree_ad_nsearch +extent_tree_ad_prev +extent_tree_ad_psearch +extent_tree_ad_remove +extent_tree_ad_reverse_iter +extent_tree_ad_reverse_iter_recurse +extent_tree_ad_reverse_iter_start +extent_tree_ad_search +extent_tree_szsnad_destroy +extent_tree_szsnad_destroy_recurse +extent_tree_szsnad_empty +extent_tree_szsnad_first +extent_tree_szsnad_insert +extent_tree_szsnad_iter +extent_tree_szsnad_iter_recurse +extent_tree_szsnad_iter_start +extent_tree_szsnad_last +extent_tree_szsnad_new +extent_tree_szsnad_next +extent_tree_szsnad_nsearch +extent_tree_szsnad_prev +extent_tree_szsnad_psearch +extent_tree_szsnad_remove +extent_tree_szsnad_reverse_iter +extent_tree_szsnad_reverse_iter_recurse +extent_tree_szsnad_reverse_iter_start +extent_tree_szsnad_search +ffs_llu +ffs_lu +ffs_u +ffs_u32 +ffs_u64 +ffs_zu +get_errno +hash +hash_fmix_32 +hash_fmix_64 +hash_get_block_32 +hash_get_block_64 +hash_rotl_32 +hash_rotl_64 +hash_x64_128 +hash_x86_128 +hash_x86_32 +huge_aalloc +huge_dalloc +huge_dalloc_junk +huge_malloc +huge_palloc +huge_prof_tctx_get +huge_prof_tctx_reset +huge_prof_tctx_set +huge_ralloc +huge_ralloc_no_move +huge_salloc +iaalloc +ialloc +iallocztm +iarena_cleanup +idalloc +idalloctm +in_valgrind +index2size +index2size_compute +index2size_lookup +index2size_tab +ipalloc +ipalloct +ipallocztm +iqalloc +iralloc +iralloct +iralloct_realign +isalloc +isdalloct +isqalloc +isthreaded +ivsalloc +ixalloc +jemalloc_postfork_child +jemalloc_postfork_parent +jemalloc_prefork +large_maxclass +lg_floor +lg_prof_sample +malloc_cprintf +malloc_mutex_assert_not_owner +malloc_mutex_assert_owner +malloc_mutex_boot +malloc_mutex_init +malloc_mutex_lock +malloc_mutex_postfork_child +malloc_mutex_postfork_parent +malloc_mutex_prefork +malloc_mutex_unlock +malloc_printf +malloc_snprintf +malloc_strtoumax +malloc_tsd_boot0 +malloc_tsd_boot1 +malloc_tsd_cleanup_register +malloc_tsd_dalloc +malloc_tsd_malloc +malloc_tsd_no_cleanup +malloc_vcprintf +malloc_vsnprintf +malloc_write +map_bias +map_misc_offset +mb_write +narenas_auto +narenas_tdata_cleanup +narenas_total_get +ncpus +nhbins +nhclasses +nlclasses +nstime_add +nstime_compare +nstime_copy +nstime_divide +nstime_idivide +nstime_imultiply +nstime_init +nstime_init2 +nstime_monotonic +nstime_ns +nstime_nsec +nstime_sec +nstime_subtract +nstime_update +opt_abort +opt_decay_time +opt_dss +opt_junk +opt_junk_alloc +opt_junk_free +opt_lg_chunk +opt_lg_dirty_mult +opt_lg_prof_interval +opt_lg_prof_sample +opt_lg_tcache_max +opt_narenas +opt_prof +opt_prof_accum +opt_prof_active +opt_prof_final +opt_prof_gdump +opt_prof_leak +opt_prof_prefix +opt_prof_thread_active_init +opt_purge +opt_quarantine +opt_redzone +opt_stats_print +opt_tcache +opt_thp +opt_utrace +opt_xmalloc +opt_zero +p2rz +pages_boot +pages_commit +pages_decommit +pages_huge +pages_map +pages_nohuge +pages_purge +pages_trim +pages_unmap +pind2sz +pind2sz_compute +pind2sz_lookup +pind2sz_tab +pow2_ceil_u32 +pow2_ceil_u64 +pow2_ceil_zu +prng_lg_range_u32 +prng_lg_range_u64 +prng_lg_range_zu +prng_range_u32 +prng_range_u64 +prng_range_zu +prng_state_next_u32 +prng_state_next_u64 +prng_state_next_zu +prof_active +prof_active_get +prof_active_get_unlocked +prof_active_set +prof_alloc_prep +prof_alloc_rollback +prof_backtrace +prof_boot0 +prof_boot1 +prof_boot2 +prof_bt_count +prof_dump_header +prof_dump_open +prof_free +prof_free_sampled_object +prof_gdump +prof_gdump_get +prof_gdump_get_unlocked +prof_gdump_set +prof_gdump_val +prof_idump +prof_interval +prof_lookup +prof_malloc +prof_malloc_sample_object +prof_mdump +prof_postfork_child +prof_postfork_parent +prof_prefork0 +prof_prefork1 +prof_realloc +prof_reset +prof_sample_accum_update +prof_sample_threshold_update +prof_tctx_get +prof_tctx_reset +prof_tctx_set +prof_tdata_cleanup +prof_tdata_count +prof_tdata_get +prof_tdata_init +prof_tdata_reinit +prof_thread_active_get +prof_thread_active_init_get +prof_thread_active_init_set +prof_thread_active_set +prof_thread_name_get +prof_thread_name_set +psz2ind +psz2u +purge_mode_names +quarantine +quarantine_alloc_hook +quarantine_alloc_hook_work +quarantine_cleanup +rtree_child_read +rtree_child_read_hard +rtree_child_tryread +rtree_delete +rtree_get +rtree_new +rtree_node_valid +rtree_set +rtree_start_level +rtree_subkey +rtree_subtree_read +rtree_subtree_read_hard +rtree_subtree_tryread +rtree_val_read +rtree_val_write +run_quantize_ceil +run_quantize_floor +s2u +s2u_compute +s2u_lookup +sa2u +set_errno +size2index +size2index_compute +size2index_lookup +size2index_tab +spin_adaptive +spin_init +stats_cactive +stats_cactive_add +stats_cactive_get +stats_cactive_sub +stats_print +tcache_alloc_easy +tcache_alloc_large +tcache_alloc_small +tcache_alloc_small_hard +tcache_arena_reassociate +tcache_bin_flush_large +tcache_bin_flush_small +tcache_bin_info +tcache_boot +tcache_cleanup +tcache_create +tcache_dalloc_large +tcache_dalloc_small +tcache_enabled_cleanup +tcache_enabled_get +tcache_enabled_set +tcache_event +tcache_event_hard +tcache_flush +tcache_get +tcache_get_hard +tcache_maxclass +tcache_postfork_child +tcache_postfork_parent +tcache_prefork +tcache_salloc +tcache_stats_merge +tcaches +tcaches_create +tcaches_destroy +tcaches_flush +tcaches_get +thread_allocated_cleanup +thread_deallocated_cleanup +ticker_copy +ticker_init +ticker_read +ticker_tick +ticker_ticks +tsd_arena_get +tsd_arena_set +tsd_arenap_get +tsd_arenas_tdata_bypass_get +tsd_arenas_tdata_bypass_set +tsd_arenas_tdata_bypassp_get +tsd_arenas_tdata_get +tsd_arenas_tdata_set +tsd_arenas_tdatap_get +tsd_boot +tsd_boot0 +tsd_boot1 +tsd_booted +tsd_booted_get +tsd_cleanup +tsd_cleanup_wrapper +tsd_fetch +tsd_fetch_impl +tsd_get +tsd_get_allocates +tsd_iarena_get +tsd_iarena_set +tsd_iarenap_get +tsd_initialized +tsd_init_check_recursion +tsd_init_finish +tsd_init_head +tsd_narenas_tdata_get +tsd_narenas_tdata_set +tsd_narenas_tdatap_get +tsd_wrapper_get +tsd_wrapper_set +tsd_nominal +tsd_prof_tdata_get +tsd_prof_tdata_set +tsd_prof_tdatap_get +tsd_quarantine_get +tsd_quarantine_set +tsd_quarantinep_get +tsd_set +tsd_tcache_enabled_get +tsd_tcache_enabled_set +tsd_tcache_enabledp_get +tsd_tcache_get +tsd_tcache_set +tsd_tcachep_get +tsd_thread_allocated_get +tsd_thread_allocated_set +tsd_thread_allocatedp_get +tsd_thread_deallocated_get +tsd_thread_deallocated_set +tsd_thread_deallocatedp_get +tsd_tls +tsd_tsd +tsd_tsdn +tsd_witness_fork_get +tsd_witness_fork_set +tsd_witness_forkp_get +tsd_witnesses_get +tsd_witnesses_set +tsd_witnessesp_get +tsdn_fetch +tsdn_null +tsdn_tsd +u2rz +valgrind_freelike_block +valgrind_make_mem_defined +valgrind_make_mem_noaccess +valgrind_make_mem_undefined +witness_assert_depth +witness_assert_depth_to_rank +witness_assert_lockless +witness_assert_not_owner +witness_assert_owner +witness_depth_error +witness_fork_cleanup +witness_init +witness_lock +witness_lock_error +witness_not_owner_error +witness_owner +witness_owner_error +witness_postfork_child +witness_postfork_parent +witness_prefork +witness_unlock +witnesses_cleanup +zone_register diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.h b/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.h new file mode 100755 index 00000000..27038c63 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.h @@ -0,0 +1,639 @@ +#undef a0dalloc +#undef a0get +#undef a0malloc +#undef arena_aalloc +#undef arena_alloc_junk_small +#undef arena_basic_stats_merge +#undef arena_bin_index +#undef arena_bin_info +#undef arena_bitselm_get_const +#undef arena_bitselm_get_mutable +#undef arena_boot +#undef arena_choose +#undef arena_choose_hard +#undef arena_choose_impl +#undef arena_chunk_alloc_huge +#undef arena_chunk_cache_maybe_insert +#undef arena_chunk_cache_maybe_remove +#undef arena_chunk_dalloc_huge +#undef arena_chunk_ralloc_huge_expand +#undef arena_chunk_ralloc_huge_shrink +#undef arena_chunk_ralloc_huge_similar +#undef arena_cleanup +#undef arena_dalloc +#undef arena_dalloc_bin +#undef arena_dalloc_bin_junked_locked +#undef arena_dalloc_junk_large +#undef arena_dalloc_junk_small +#undef arena_dalloc_large +#undef arena_dalloc_large_junked_locked +#undef arena_dalloc_small +#undef arena_decay_tick +#undef arena_decay_ticks +#undef arena_decay_time_default_get +#undef arena_decay_time_default_set +#undef arena_decay_time_get +#undef arena_decay_time_set +#undef arena_dss_prec_get +#undef arena_dss_prec_set +#undef arena_extent_sn_next +#undef arena_get +#undef arena_ichoose +#undef arena_init +#undef arena_lg_dirty_mult_default_get +#undef arena_lg_dirty_mult_default_set +#undef arena_lg_dirty_mult_get +#undef arena_lg_dirty_mult_set +#undef arena_malloc +#undef arena_malloc_hard +#undef arena_malloc_large +#undef arena_mapbits_allocated_get +#undef arena_mapbits_binind_get +#undef arena_mapbits_decommitted_get +#undef arena_mapbits_dirty_get +#undef arena_mapbits_get +#undef arena_mapbits_internal_set +#undef arena_mapbits_large_binind_set +#undef arena_mapbits_large_get +#undef arena_mapbits_large_set +#undef arena_mapbits_large_size_get +#undef arena_mapbits_size_decode +#undef arena_mapbits_size_encode +#undef arena_mapbits_small_runind_get +#undef arena_mapbits_small_set +#undef arena_mapbits_unallocated_set +#undef arena_mapbits_unallocated_size_get +#undef arena_mapbits_unallocated_size_set +#undef arena_mapbits_unzeroed_get +#undef arena_mapbitsp_get_const +#undef arena_mapbitsp_get_mutable +#undef arena_mapbitsp_read +#undef arena_mapbitsp_write +#undef arena_maxrun +#undef arena_maybe_purge +#undef arena_metadata_allocated_add +#undef arena_metadata_allocated_get +#undef arena_metadata_allocated_sub +#undef arena_migrate +#undef arena_miscelm_get_const +#undef arena_miscelm_get_mutable +#undef arena_miscelm_to_pageind +#undef arena_miscelm_to_rpages +#undef arena_new +#undef arena_node_alloc +#undef arena_node_dalloc +#undef arena_nthreads_dec +#undef arena_nthreads_get +#undef arena_nthreads_inc +#undef arena_palloc +#undef arena_postfork_child +#undef arena_postfork_parent +#undef arena_prefork0 +#undef arena_prefork1 +#undef arena_prefork2 +#undef arena_prefork3 +#undef arena_prof_accum +#undef arena_prof_accum_impl +#undef arena_prof_accum_locked +#undef arena_prof_promoted +#undef arena_prof_tctx_get +#undef arena_prof_tctx_reset +#undef arena_prof_tctx_set +#undef arena_ptr_small_binind_get +#undef arena_purge +#undef arena_quarantine_junk_small +#undef arena_ralloc +#undef arena_ralloc_junk_large +#undef arena_ralloc_no_move +#undef arena_rd_to_miscelm +#undef arena_redzone_corruption +#undef arena_reset +#undef arena_run_regind +#undef arena_run_to_miscelm +#undef arena_salloc +#undef arena_sdalloc +#undef arena_stats_merge +#undef arena_tcache_fill_small +#undef arena_tdata_get +#undef arena_tdata_get_hard +#undef arenas +#undef arenas_tdata_bypass_cleanup +#undef arenas_tdata_cleanup +#undef atomic_add_p +#undef atomic_add_u +#undef atomic_add_uint32 +#undef atomic_add_uint64 +#undef atomic_add_z +#undef atomic_cas_p +#undef atomic_cas_u +#undef atomic_cas_uint32 +#undef atomic_cas_uint64 +#undef atomic_cas_z +#undef atomic_sub_p +#undef atomic_sub_u +#undef atomic_sub_uint32 +#undef atomic_sub_uint64 +#undef atomic_sub_z +#undef atomic_write_p +#undef atomic_write_u +#undef atomic_write_uint32 +#undef atomic_write_uint64 +#undef atomic_write_z +#undef base_alloc +#undef base_boot +#undef base_postfork_child +#undef base_postfork_parent +#undef base_prefork +#undef base_stats_get +#undef bitmap_full +#undef bitmap_get +#undef bitmap_info_init +#undef bitmap_init +#undef bitmap_set +#undef bitmap_sfu +#undef bitmap_size +#undef bitmap_unset +#undef bootstrap_calloc +#undef bootstrap_free +#undef bootstrap_malloc +#undef bt_init +#undef buferror +#undef chunk_alloc_base +#undef chunk_alloc_cache +#undef chunk_alloc_dss +#undef chunk_alloc_mmap +#undef chunk_alloc_wrapper +#undef chunk_boot +#undef chunk_dalloc_cache +#undef chunk_dalloc_mmap +#undef chunk_dalloc_wrapper +#undef chunk_deregister +#undef chunk_dss_boot +#undef chunk_dss_mergeable +#undef chunk_dss_prec_get +#undef chunk_dss_prec_set +#undef chunk_hooks_default +#undef chunk_hooks_get +#undef chunk_hooks_set +#undef chunk_in_dss +#undef chunk_lookup +#undef chunk_npages +#undef chunk_purge_wrapper +#undef chunk_register +#undef chunks_rtree +#undef chunksize +#undef chunksize_mask +#undef ckh_count +#undef ckh_delete +#undef ckh_insert +#undef ckh_iter +#undef ckh_new +#undef ckh_pointer_hash +#undef ckh_pointer_keycomp +#undef ckh_remove +#undef ckh_search +#undef ckh_string_hash +#undef ckh_string_keycomp +#undef ctl_boot +#undef ctl_bymib +#undef ctl_byname +#undef ctl_nametomib +#undef ctl_postfork_child +#undef ctl_postfork_parent +#undef ctl_prefork +#undef decay_ticker_get +#undef dss_prec_names +#undef extent_node_achunk_get +#undef extent_node_achunk_set +#undef extent_node_addr_get +#undef extent_node_addr_set +#undef extent_node_arena_get +#undef extent_node_arena_set +#undef extent_node_committed_get +#undef extent_node_committed_set +#undef extent_node_dirty_insert +#undef extent_node_dirty_linkage_init +#undef extent_node_dirty_remove +#undef extent_node_init +#undef extent_node_prof_tctx_get +#undef extent_node_prof_tctx_set +#undef extent_node_size_get +#undef extent_node_size_set +#undef extent_node_sn_get +#undef extent_node_sn_set +#undef extent_node_zeroed_get +#undef extent_node_zeroed_set +#undef extent_size_quantize_ceil +#undef extent_size_quantize_floor +#undef extent_tree_ad_destroy +#undef extent_tree_ad_destroy_recurse +#undef extent_tree_ad_empty +#undef extent_tree_ad_first +#undef extent_tree_ad_insert +#undef extent_tree_ad_iter +#undef extent_tree_ad_iter_recurse +#undef extent_tree_ad_iter_start +#undef extent_tree_ad_last +#undef extent_tree_ad_new +#undef extent_tree_ad_next +#undef extent_tree_ad_nsearch +#undef extent_tree_ad_prev +#undef extent_tree_ad_psearch +#undef extent_tree_ad_remove +#undef extent_tree_ad_reverse_iter +#undef extent_tree_ad_reverse_iter_recurse +#undef extent_tree_ad_reverse_iter_start +#undef extent_tree_ad_search +#undef extent_tree_szsnad_destroy +#undef extent_tree_szsnad_destroy_recurse +#undef extent_tree_szsnad_empty +#undef extent_tree_szsnad_first +#undef extent_tree_szsnad_insert +#undef extent_tree_szsnad_iter +#undef extent_tree_szsnad_iter_recurse +#undef extent_tree_szsnad_iter_start +#undef extent_tree_szsnad_last +#undef extent_tree_szsnad_new +#undef extent_tree_szsnad_next +#undef extent_tree_szsnad_nsearch +#undef extent_tree_szsnad_prev +#undef extent_tree_szsnad_psearch +#undef extent_tree_szsnad_remove +#undef extent_tree_szsnad_reverse_iter +#undef extent_tree_szsnad_reverse_iter_recurse +#undef extent_tree_szsnad_reverse_iter_start +#undef extent_tree_szsnad_search +#undef ffs_llu +#undef ffs_lu +#undef ffs_u +#undef ffs_u32 +#undef ffs_u64 +#undef ffs_zu +#undef get_errno +#undef hash +#undef hash_fmix_32 +#undef hash_fmix_64 +#undef hash_get_block_32 +#undef hash_get_block_64 +#undef hash_rotl_32 +#undef hash_rotl_64 +#undef hash_x64_128 +#undef hash_x86_128 +#undef hash_x86_32 +#undef huge_aalloc +#undef huge_dalloc +#undef huge_dalloc_junk +#undef huge_malloc +#undef huge_palloc +#undef huge_prof_tctx_get +#undef huge_prof_tctx_reset +#undef huge_prof_tctx_set +#undef huge_ralloc +#undef huge_ralloc_no_move +#undef huge_salloc +#undef iaalloc +#undef ialloc +#undef iallocztm +#undef iarena_cleanup +#undef idalloc +#undef idalloctm +#undef in_valgrind +#undef index2size +#undef index2size_compute +#undef index2size_lookup +#undef index2size_tab +#undef ipalloc +#undef ipalloct +#undef ipallocztm +#undef iqalloc +#undef iralloc +#undef iralloct +#undef iralloct_realign +#undef isalloc +#undef isdalloct +#undef isqalloc +#undef isthreaded +#undef ivsalloc +#undef ixalloc +#undef jemalloc_postfork_child +#undef jemalloc_postfork_parent +#undef jemalloc_prefork +#undef large_maxclass +#undef lg_floor +#undef lg_prof_sample +#undef malloc_cprintf +#undef malloc_mutex_assert_not_owner +#undef malloc_mutex_assert_owner +#undef malloc_mutex_boot +#undef malloc_mutex_init +#undef malloc_mutex_lock +#undef malloc_mutex_postfork_child +#undef malloc_mutex_postfork_parent +#undef malloc_mutex_prefork +#undef malloc_mutex_unlock +#undef malloc_printf +#undef malloc_snprintf +#undef malloc_strtoumax +#undef malloc_tsd_boot0 +#undef malloc_tsd_boot1 +#undef malloc_tsd_cleanup_register +#undef malloc_tsd_dalloc +#undef malloc_tsd_malloc +#undef malloc_tsd_no_cleanup +#undef malloc_vcprintf +#undef malloc_vsnprintf +#undef malloc_write +#undef map_bias +#undef map_misc_offset +#undef mb_write +#undef narenas_auto +#undef narenas_tdata_cleanup +#undef narenas_total_get +#undef ncpus +#undef nhbins +#undef nhclasses +#undef nlclasses +#undef nstime_add +#undef nstime_compare +#undef nstime_copy +#undef nstime_divide +#undef nstime_idivide +#undef nstime_imultiply +#undef nstime_init +#undef nstime_init2 +#undef nstime_monotonic +#undef nstime_ns +#undef nstime_nsec +#undef nstime_sec +#undef nstime_subtract +#undef nstime_update +#undef opt_abort +#undef opt_decay_time +#undef opt_dss +#undef opt_junk +#undef opt_junk_alloc +#undef opt_junk_free +#undef opt_lg_chunk +#undef opt_lg_dirty_mult +#undef opt_lg_prof_interval +#undef opt_lg_prof_sample +#undef opt_lg_tcache_max +#undef opt_narenas +#undef opt_prof +#undef opt_prof_accum +#undef opt_prof_active +#undef opt_prof_final +#undef opt_prof_gdump +#undef opt_prof_leak +#undef opt_prof_prefix +#undef opt_prof_thread_active_init +#undef opt_purge +#undef opt_quarantine +#undef opt_redzone +#undef opt_stats_print +#undef opt_tcache +#undef opt_thp +#undef opt_utrace +#undef opt_xmalloc +#undef opt_zero +#undef p2rz +#undef pages_boot +#undef pages_commit +#undef pages_decommit +#undef pages_huge +#undef pages_map +#undef pages_nohuge +#undef pages_purge +#undef pages_trim +#undef pages_unmap +#undef pind2sz +#undef pind2sz_compute +#undef pind2sz_lookup +#undef pind2sz_tab +#undef pow2_ceil_u32 +#undef pow2_ceil_u64 +#undef pow2_ceil_zu +#undef prng_lg_range_u32 +#undef prng_lg_range_u64 +#undef prng_lg_range_zu +#undef prng_range_u32 +#undef prng_range_u64 +#undef prng_range_zu +#undef prng_state_next_u32 +#undef prng_state_next_u64 +#undef prng_state_next_zu +#undef prof_active +#undef prof_active_get +#undef prof_active_get_unlocked +#undef prof_active_set +#undef prof_alloc_prep +#undef prof_alloc_rollback +#undef prof_backtrace +#undef prof_boot0 +#undef prof_boot1 +#undef prof_boot2 +#undef prof_bt_count +#undef prof_dump_header +#undef prof_dump_open +#undef prof_free +#undef prof_free_sampled_object +#undef prof_gdump +#undef prof_gdump_get +#undef prof_gdump_get_unlocked +#undef prof_gdump_set +#undef prof_gdump_val +#undef prof_idump +#undef prof_interval +#undef prof_lookup +#undef prof_malloc +#undef prof_malloc_sample_object +#undef prof_mdump +#undef prof_postfork_child +#undef prof_postfork_parent +#undef prof_prefork0 +#undef prof_prefork1 +#undef prof_realloc +#undef prof_reset +#undef prof_sample_accum_update +#undef prof_sample_threshold_update +#undef prof_tctx_get +#undef prof_tctx_reset +#undef prof_tctx_set +#undef prof_tdata_cleanup +#undef prof_tdata_count +#undef prof_tdata_get +#undef prof_tdata_init +#undef prof_tdata_reinit +#undef prof_thread_active_get +#undef prof_thread_active_init_get +#undef prof_thread_active_init_set +#undef prof_thread_active_set +#undef prof_thread_name_get +#undef prof_thread_name_set +#undef psz2ind +#undef psz2u +#undef purge_mode_names +#undef quarantine +#undef quarantine_alloc_hook +#undef quarantine_alloc_hook_work +#undef quarantine_cleanup +#undef rtree_child_read +#undef rtree_child_read_hard +#undef rtree_child_tryread +#undef rtree_delete +#undef rtree_get +#undef rtree_new +#undef rtree_node_valid +#undef rtree_set +#undef rtree_start_level +#undef rtree_subkey +#undef rtree_subtree_read +#undef rtree_subtree_read_hard +#undef rtree_subtree_tryread +#undef rtree_val_read +#undef rtree_val_write +#undef run_quantize_ceil +#undef run_quantize_floor +#undef s2u +#undef s2u_compute +#undef s2u_lookup +#undef sa2u +#undef set_errno +#undef size2index +#undef size2index_compute +#undef size2index_lookup +#undef size2index_tab +#undef spin_adaptive +#undef spin_init +#undef stats_cactive +#undef stats_cactive_add +#undef stats_cactive_get +#undef stats_cactive_sub +#undef stats_print +#undef tcache_alloc_easy +#undef tcache_alloc_large +#undef tcache_alloc_small +#undef tcache_alloc_small_hard +#undef tcache_arena_reassociate +#undef tcache_bin_flush_large +#undef tcache_bin_flush_small +#undef tcache_bin_info +#undef tcache_boot +#undef tcache_cleanup +#undef tcache_create +#undef tcache_dalloc_large +#undef tcache_dalloc_small +#undef tcache_enabled_cleanup +#undef tcache_enabled_get +#undef tcache_enabled_set +#undef tcache_event +#undef tcache_event_hard +#undef tcache_flush +#undef tcache_get +#undef tcache_get_hard +#undef tcache_maxclass +#undef tcache_postfork_child +#undef tcache_postfork_parent +#undef tcache_prefork +#undef tcache_salloc +#undef tcache_stats_merge +#undef tcaches +#undef tcaches_create +#undef tcaches_destroy +#undef tcaches_flush +#undef tcaches_get +#undef thread_allocated_cleanup +#undef thread_deallocated_cleanup +#undef ticker_copy +#undef ticker_init +#undef ticker_read +#undef ticker_tick +#undef ticker_ticks +#undef tsd_arena_get +#undef tsd_arena_set +#undef tsd_arenap_get +#undef tsd_arenas_tdata_bypass_get +#undef tsd_arenas_tdata_bypass_set +#undef tsd_arenas_tdata_bypassp_get +#undef tsd_arenas_tdata_get +#undef tsd_arenas_tdata_set +#undef tsd_arenas_tdatap_get +#undef tsd_boot +#undef tsd_boot0 +#undef tsd_boot1 +#undef tsd_booted +#undef tsd_booted_get +#undef tsd_cleanup +#undef tsd_cleanup_wrapper +#undef tsd_fetch +#undef tsd_fetch_impl +#undef tsd_get +#undef tsd_get_allocates +#undef tsd_iarena_get +#undef tsd_iarena_set +#undef tsd_iarenap_get +#undef tsd_initialized +#undef tsd_init_check_recursion +#undef tsd_init_finish +#undef tsd_init_head +#undef tsd_narenas_tdata_get +#undef tsd_narenas_tdata_set +#undef tsd_narenas_tdatap_get +#undef tsd_wrapper_get +#undef tsd_wrapper_set +#undef tsd_nominal +#undef tsd_prof_tdata_get +#undef tsd_prof_tdata_set +#undef tsd_prof_tdatap_get +#undef tsd_quarantine_get +#undef tsd_quarantine_set +#undef tsd_quarantinep_get +#undef tsd_set +#undef tsd_tcache_enabled_get +#undef tsd_tcache_enabled_set +#undef tsd_tcache_enabledp_get +#undef tsd_tcache_get +#undef tsd_tcache_set +#undef tsd_tcachep_get +#undef tsd_thread_allocated_get +#undef tsd_thread_allocated_set +#undef tsd_thread_allocatedp_get +#undef tsd_thread_deallocated_get +#undef tsd_thread_deallocated_set +#undef tsd_thread_deallocatedp_get +#undef tsd_tls +#undef tsd_tsd +#undef tsd_tsdn +#undef tsd_witness_fork_get +#undef tsd_witness_fork_set +#undef tsd_witness_forkp_get +#undef tsd_witnesses_get +#undef tsd_witnesses_set +#undef tsd_witnessesp_get +#undef tsdn_fetch +#undef tsdn_null +#undef tsdn_tsd +#undef u2rz +#undef valgrind_freelike_block +#undef valgrind_make_mem_defined +#undef valgrind_make_mem_noaccess +#undef valgrind_make_mem_undefined +#undef witness_assert_depth +#undef witness_assert_depth_to_rank +#undef witness_assert_lockless +#undef witness_assert_not_owner +#undef witness_assert_owner +#undef witness_depth_error +#undef witness_fork_cleanup +#undef witness_init +#undef witness_lock +#undef witness_lock_error +#undef witness_not_owner_error +#undef witness_owner +#undef witness_owner_error +#undef witness_postfork_child +#undef witness_postfork_parent +#undef witness_prefork +#undef witness_unlock +#undef witnesses_cleanup +#undef zone_register diff --git a/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.sh b/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.sh new file mode 100755 index 00000000..23fed8e8 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/private_unnamespace.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +for symbol in `cat $1` ; do + echo "#undef ${symbol}" +done diff --git a/sources/jemalloc/include-win/jemalloc/internal/prng.h b/sources/jemalloc/include-win/jemalloc/internal/prng.h new file mode 100755 index 00000000..c2bda19c --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/prng.h @@ -0,0 +1,207 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * Simple linear congruential pseudo-random number generator: + * + * prng(y) = (a*x + c) % m + * + * where the following constants ensure maximal period: + * + * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. + * c == Odd number (relatively prime to 2^n). + * m == 2^32 + * + * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. + * + * This choice of m has the disadvantage that the quality of the bits is + * proportional to bit position. For example, the lowest bit has a cycle of 2, + * the next has a cycle of 4, etc. For this reason, we prefer to use the upper + * bits. + */ + +#define PRNG_A_32 UINT32_C(1103515241) +#define PRNG_C_32 UINT32_C(12347) + +#define PRNG_A_64 UINT64_C(6364136223846793005) +#define PRNG_C_64 UINT64_C(1442695040888963407) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +uint32_t prng_state_next_u32(uint32_t state); +uint64_t prng_state_next_u64(uint64_t state); +size_t prng_state_next_zu(size_t state); + +uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, + bool atomic); +uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range); +size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic); + +uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic); +uint64_t prng_range_u64(uint64_t *state, uint64_t range); +size_t prng_range_zu(size_t *state, size_t range, bool atomic); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_)) +JEMALLOC_ALWAYS_INLINE uint32_t +prng_state_next_u32(uint32_t state) +{ + + return ((state * PRNG_A_32) + PRNG_C_32); +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_state_next_u64(uint64_t state) +{ + + return ((state * PRNG_A_64) + PRNG_C_64); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_state_next_zu(size_t state) +{ + +#if LG_SIZEOF_PTR == 2 + return ((state * PRNG_A_32) + PRNG_C_32); +#elif LG_SIZEOF_PTR == 3 + return ((state * PRNG_A_64) + PRNG_C_64); +#else +#error Unsupported pointer size +#endif +} + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) +{ + uint32_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 32); + + if (atomic) { + uint32_t state0; + + do { + state0 = atomic_read_uint32(state); + state1 = prng_state_next_u32(state0); + } while (atomic_cas_uint32(state, state0, state1)); + } else { + state1 = prng_state_next_u32(*state); + *state = state1; + } + ret = state1 >> (32 - lg_range); + + return (ret); +} + +/* 64-bit atomic operations cannot be supported on all relevant platforms. */ +JEMALLOC_ALWAYS_INLINE uint64_t +prng_lg_range_u64(uint64_t *state, unsigned lg_range) +{ + uint64_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 64); + + state1 = prng_state_next_u64(*state); + *state = state1; + ret = state1 >> (64 - lg_range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) +{ + size_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); + + if (atomic) { + size_t state0; + + do { + state0 = atomic_read_z(state); + state1 = prng_state_next_zu(state0); + } while (atomic_cas_z(state, state0, state1)); + } else { + state1 = prng_state_next_zu(*state); + *state = state1; + } + ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_range_u32(uint32_t *state, uint32_t range, bool atomic) +{ + uint32_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u32(state, lg_range, atomic); + } while (ret >= range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_range_u64(uint64_t *state, uint64_t range) +{ + uint64_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u64(state, lg_range); + } while (ret >= range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_range_zu(size_t *state, size_t range, bool atomic) +{ + size_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_zu(state, lg_range, atomic); + } while (ret >= range); + + return (ret); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/prof.h b/sources/jemalloc/include-win/jemalloc/internal/prof.h new file mode 100755 index 00000000..8293b71e --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/prof.h @@ -0,0 +1,547 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct prof_bt_s prof_bt_t; +typedef struct prof_cnt_s prof_cnt_t; +typedef struct prof_tctx_s prof_tctx_t; +typedef struct prof_gctx_s prof_gctx_t; +typedef struct prof_tdata_s prof_tdata_t; + +/* Option defaults. */ +#ifdef JEMALLOC_PROF +# define PROF_PREFIX_DEFAULT "jeprof" +#else +# define PROF_PREFIX_DEFAULT "" +#endif +#define LG_PROF_SAMPLE_DEFAULT 19 +#define LG_PROF_INTERVAL_DEFAULT -1 + +/* + * Hard limit on stack backtrace depth. The version of prof_backtrace() that + * is based on __builtin_return_address() necessarily has a hard-coded number + * of backtrace frame handlers, and should be kept in sync with this setting. + */ +#define PROF_BT_MAX 128 + +/* Initial hash table size. */ +#define PROF_CKH_MINITEMS 64 + +/* Size of memory buffer to use when writing dump files. */ +#define PROF_DUMP_BUFSIZE 65536 + +/* Size of stack-allocated buffer used by prof_printf(). */ +#define PROF_PRINTF_BUFSIZE 128 + +/* + * Number of mutexes shared among all gctx's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NCTX_LOCKS 1024 + +/* + * Number of mutexes shared among all tdata's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NTDATA_LOCKS 256 + +/* + * prof_tdata pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) +#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) +#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct prof_bt_s { + /* Backtrace, stored as len program counters. */ + void **vec; + unsigned len; +}; + +#ifdef JEMALLOC_PROF_LIBGCC +/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ +typedef struct { + prof_bt_t *bt; + unsigned max; +} prof_unwind_data_t; +#endif + +struct prof_cnt_s { + /* Profiling counters. */ + uint64_t curobjs; + uint64_t curbytes; + uint64_t accumobjs; + uint64_t accumbytes; +}; + +typedef enum { + prof_tctx_state_initializing, + prof_tctx_state_nominal, + prof_tctx_state_dumping, + prof_tctx_state_purgatory /* Dumper must finish destroying. */ +} prof_tctx_state_t; + +struct prof_tctx_s { + /* Thread data for thread that performed the allocation. */ + prof_tdata_t *tdata; + + /* + * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be + * defunct during teardown. + */ + uint64_t thr_uid; + uint64_t thr_discrim; + + /* Profiling counters, protected by tdata->lock. */ + prof_cnt_t cnts; + + /* Associated global context. */ + prof_gctx_t *gctx; + + /* + * UID that distinguishes multiple tctx's created by the same thread, + * but coexisting in gctx->tctxs. There are two ways that such + * coexistence can occur: + * - A dumper thread can cause a tctx to be retained in the purgatory + * state. + * - Although a single "producer" thread must create all tctx's which + * share the same thr_uid, multiple "consumers" can each concurrently + * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only + * gets called once each time cnts.cur{objs,bytes} drop to 0, but this + * threshold can be hit again before the first consumer finishes + * executing prof_tctx_destroy(). + */ + uint64_t tctx_uid; + + /* Linkage into gctx's tctxs. */ + rb_node(prof_tctx_t) tctx_link; + + /* + * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents + * sample vs destroy race. + */ + bool prepared; + + /* Current dump-related state, protected by gctx->lock. */ + prof_tctx_state_t state; + + /* + * Copy of cnts snapshotted during early dump phase, protected by + * dump_mtx. + */ + prof_cnt_t dump_cnts; +}; +typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; + +struct prof_gctx_s { + /* Protects nlimbo, cnt_summed, and tctxs. */ + malloc_mutex_t *lock; + + /* + * Number of threads that currently cause this gctx to be in a state of + * limbo due to one of: + * - Initializing this gctx. + * - Initializing per thread counters associated with this gctx. + * - Preparing to destroy this gctx. + * - Dumping a heap profile that includes this gctx. + * nlimbo must be 1 (single destroyer) in order to safely destroy the + * gctx. + */ + unsigned nlimbo; + + /* + * Tree of profile counters, one for each thread that has allocated in + * this context. + */ + prof_tctx_tree_t tctxs; + + /* Linkage for tree of contexts to be dumped. */ + rb_node(prof_gctx_t) dump_link; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Associated backtrace. */ + prof_bt_t bt; + + /* Backtrace vector, variable size, referred to by bt. */ + void *vec[1]; +}; +typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; + +struct prof_tdata_s { + malloc_mutex_t *lock; + + /* Monotonically increasing unique thread identifier. */ + uint64_t thr_uid; + + /* + * Monotonically increasing discriminator among tdata structures + * associated with the same thr_uid. + */ + uint64_t thr_discrim; + + /* Included in heap profile dumps if non-NULL. */ + char *thread_name; + + bool attached; + bool expired; + + rb_node(prof_tdata_t) tdata_link; + + /* + * Counter used to initialize prof_tctx_t's tctx_uid. No locking is + * necessary when incrementing this field, because only one thread ever + * does so. + */ + uint64_t tctx_uid_next; + + /* + * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks + * backtraces for which it has non-zero allocation/deallocation counters + * associated with thread-specific prof_tctx_t objects. Other threads + * may write to prof_tctx_t contents when freeing associated objects. + */ + ckh_t bt2tctx; + + /* Sampling state. */ + uint64_t prng_state; + uint64_t bytes_until_sample; + + /* State used to avoid dumping while operating on prof internals. */ + bool enq; + bool enq_idump; + bool enq_gdump; + + /* + * Set to true during an early dump phase for tdata's which are + * currently being dumped. New threads' tdata's have this initialized + * to false so that they aren't accidentally included in later dump + * phases. + */ + bool dumping; + + /* + * True if profiling is active for this tdata's thread + * (thread.prof.active mallctl). + */ + bool active; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Backtrace vector, used for calls to prof_backtrace(). */ + void *vec[PROF_BT_MAX]; +}; +typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_prof; +extern bool opt_prof_active; +extern bool opt_prof_thread_active_init; +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_final; /* Final profile dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Accessed via prof_active_[gs]et{_unlocked,}(). */ +extern bool prof_active; + +/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ +extern bool prof_gdump_val; + +/* + * Profile dump interval, measured in bytes allocated. Each arena triggers a + * profile dump when it reaches this threshold. The effect is that the + * interval between profile dumps averages prof_interval, though the actual + * interval between dumps will tend to be sporadic, and the interval will be a + * maximum of approximately (prof_interval * narenas). + */ +extern uint64_t prof_interval; + +/* + * Initialized as opt_lg_prof_sample, and potentially modified during profiling + * resets. + */ +extern size_t lg_prof_sample; + +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); +void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); +void bt_init(prof_bt_t *bt, void **vec); +void prof_backtrace(prof_bt_t *bt); +prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); +#ifdef JEMALLOC_JET +size_t prof_tdata_count(void); +size_t prof_bt_count(void); +const prof_cnt_t *prof_cnt_all(void); +typedef int (prof_dump_open_t)(bool, const char *); +extern prof_dump_open_t *prof_dump_open; +typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); +extern prof_dump_header_t *prof_dump_header; +#endif +void prof_idump(tsdn_t *tsdn); +bool prof_mdump(tsd_t *tsd, const char *filename); +void prof_gdump(tsdn_t *tsdn); +prof_tdata_t *prof_tdata_init(tsd_t *tsd); +prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); +void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_tdata_cleanup(tsd_t *tsd); +bool prof_active_get(tsdn_t *tsdn); +bool prof_active_set(tsdn_t *tsdn, bool active); +const char *prof_thread_name_get(tsd_t *tsd); +int prof_thread_name_set(tsd_t *tsd, const char *thread_name); +bool prof_thread_active_get(tsd_t *tsd); +bool prof_thread_active_set(tsd_t *tsd, bool active); +bool prof_thread_active_init_get(tsdn_t *tsdn); +bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); +bool prof_gdump_get(tsdn_t *tsdn); +bool prof_gdump_set(tsdn_t *tsdn, bool active); +void prof_boot0(void); +void prof_boot1(void); +bool prof_boot2(tsd_t *tsd); +void prof_prefork0(tsdn_t *tsdn); +void prof_prefork1(tsdn_t *tsdn); +void prof_postfork_parent(tsdn_t *tsdn); +void prof_postfork_child(tsdn_t *tsdn); +void prof_sample_threshold_update(prof_tdata_t *tdata); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool prof_active_get_unlocked(void); +bool prof_gdump_get_unlocked(void); +prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); +prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *tctx); +bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, + prof_tdata_t **tdata_out); +prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, + bool update); +void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, + prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, + size_t old_usize, prof_tctx_t *old_tctx); +void prof_free(tsd_t *tsd, const void *ptr, size_t usize); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) +JEMALLOC_ALWAYS_INLINE bool +prof_active_get_unlocked(void) +{ + + /* + * Even if opt_prof is true, sampling can be temporarily disabled by + * setting prof_active to false. No locking is used when reading + * prof_active in the fast path, so there are no guarantees regarding + * how long it will take for all threads to notice state changes. + */ + return (prof_active); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_gdump_get_unlocked(void) +{ + + /* + * No locking is used when reading prof_gdump_val in the fast path, so + * there are no guarantees regarding how long it will take for all + * threads to notice state changes. + */ + return (prof_gdump_val); +} + +JEMALLOC_ALWAYS_INLINE prof_tdata_t * +prof_tdata_get(tsd_t *tsd, bool create) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = tsd_prof_tdata_get(tsd); + if (create) { + if (unlikely(tdata == NULL)) { + if (tsd_nominal(tsd)) { + tdata = prof_tdata_init(tsd); + tsd_prof_tdata_set(tsd, tdata); + } + } else if (unlikely(tdata->expired)) { + tdata = prof_tdata_reinit(tsd, tdata); + tsd_prof_tdata_set(tsd, tdata); + } + assert(tdata == NULL || tdata->attached); + } + + return (tdata); +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_tctx_get(tsdn_t *tsdn, const void *ptr) +{ + + cassert(config_prof); + assert(ptr != NULL); + + return (arena_prof_tctx_get(tsdn, ptr)); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_set(tsdn, ptr, usize, tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, + prof_tctx_t *old_tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, + prof_tdata_t **tdata_out) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, true); + if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) + tdata = NULL; + + if (tdata_out != NULL) + *tdata_out = tdata; + + if (unlikely(tdata == NULL)) + return (true); + + if (likely(tdata->bytes_until_sample >= usize)) { + if (update) + tdata->bytes_until_sample -= usize; + return (true); + } else { + /* Compute new sample threshold. */ + if (update) + prof_sample_threshold_update(tdata); + return (!tdata->active); + } +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) +{ + prof_tctx_t *ret; + prof_tdata_t *tdata; + prof_bt_t bt; + + assert(usize == s2u(usize)); + + if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, + &tdata))) + ret = (prof_tctx_t *)(uintptr_t)1U; + else { + bt_init(&bt, tdata->vec); + prof_backtrace(&bt); + ret = prof_lookup(tsd, &bt); + } + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + assert(usize == isalloc(tsdn, ptr, true)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) + prof_malloc_sample_object(tsdn, ptr, usize, tctx); + else + prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); +} + +JEMALLOC_ALWAYS_INLINE void +prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, + bool prof_active, bool updated, const void *old_ptr, size_t old_usize, + prof_tctx_t *old_tctx) +{ + bool sampled, old_sampled; + + cassert(config_prof); + assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); + + if (prof_active && !updated && ptr != NULL) { + assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); + if (prof_sample_accum_update(tsd, usize, true, NULL)) { + /* + * Don't sample. The usize passed to prof_alloc_prep() + * was larger than what actually got allocated, so a + * backtrace was captured for this allocation, even + * though its actual usize was insufficient to cross the + * sample threshold. + */ + prof_alloc_rollback(tsd, tctx, true); + tctx = (prof_tctx_t *)(uintptr_t)1U; + } + } + + sampled = ((uintptr_t)tctx > (uintptr_t)1U); + old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); + + if (unlikely(sampled)) + prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); + else + prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx); + + if (unlikely(old_sampled)) + prof_free_sampled_object(tsd, old_usize, old_tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_free(tsd_t *tsd, const void *ptr, size_t usize) +{ + prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); + + cassert(config_prof); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) + prof_free_sampled_object(tsd, usize, tctx); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_namespace.h b/sources/jemalloc/include-win/jemalloc/internal/public_namespace.h new file mode 100755 index 00000000..edc48198 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/public_namespace.h @@ -0,0 +1,20 @@ +#define je_malloc_conf JEMALLOC_N(malloc_conf) +#define je_malloc_message JEMALLOC_N(malloc_message) +#define je_malloc JEMALLOC_N(malloc) +#define je_calloc JEMALLOC_N(calloc) +#define je_posix_memalign JEMALLOC_N(posix_memalign) +#define je_aligned_alloc JEMALLOC_N(aligned_alloc) +#define je_realloc JEMALLOC_N(realloc) +#define je_free JEMALLOC_N(free) +#define je_mallocx JEMALLOC_N(mallocx) +#define je_rallocx JEMALLOC_N(rallocx) +#define je_xallocx JEMALLOC_N(xallocx) +#define je_sallocx JEMALLOC_N(sallocx) +#define je_dallocx JEMALLOC_N(dallocx) +#define je_sdallocx JEMALLOC_N(sdallocx) +#define je_nallocx JEMALLOC_N(nallocx) +#define je_mallctl JEMALLOC_N(mallctl) +#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib) +#define je_mallctlbymib JEMALLOC_N(mallctlbymib) +#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print) +#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size) diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_namespace.sh b/sources/jemalloc/include-win/jemalloc/internal/public_namespace.sh new file mode 100755 index 00000000..362109f7 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/public_namespace.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for nm in `cat $1` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + echo "#define je_${n} JEMALLOC_N(${n})" +done diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_symbols.txt b/sources/jemalloc/include-win/jemalloc/internal/public_symbols.txt new file mode 100755 index 00000000..9f6b1d2b --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/public_symbols.txt @@ -0,0 +1,20 @@ +malloc_conf:je_malloc_conf +malloc_message:je_malloc_message +malloc:je_malloc +calloc:je_calloc +posix_memalign:je_posix_memalign +aligned_alloc:je_aligned_alloc +realloc:je_realloc +free:je_free +mallocx:je_mallocx +rallocx:je_rallocx +xallocx:je_xallocx +sallocx:je_sallocx +dallocx:je_dallocx +sdallocx:je_sdallocx +nallocx:je_nallocx +mallctl:je_mallctl +mallctlnametomib:je_mallctlnametomib +mallctlbymib:je_mallctlbymib +malloc_stats_print:je_malloc_stats_print +malloc_usable_size:je_malloc_usable_size diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.h b/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.h new file mode 100755 index 00000000..4d56cedd --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.h @@ -0,0 +1,20 @@ +#undef je_malloc_conf +#undef je_malloc_message +#undef je_malloc +#undef je_calloc +#undef je_posix_memalign +#undef je_aligned_alloc +#undef je_realloc +#undef je_free +#undef je_mallocx +#undef je_rallocx +#undef je_xallocx +#undef je_sallocx +#undef je_dallocx +#undef je_sdallocx +#undef je_nallocx +#undef je_mallctl +#undef je_mallctlnametomib +#undef je_mallctlbymib +#undef je_malloc_stats_print +#undef je_malloc_usable_size diff --git a/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.sh b/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.sh new file mode 100755 index 00000000..4239d177 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/public_unnamespace.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for nm in `cat $1` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + echo "#undef je_${n}" +done diff --git a/sources/jemalloc/include-win/jemalloc/internal/ql.h b/sources/jemalloc/include-win/jemalloc/internal/ql.h new file mode 100755 index 00000000..1834bb85 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/ql.h @@ -0,0 +1,81 @@ +/* List definitions. */ +#define ql_head(a_type) \ +struct { \ + a_type *qlh_first; \ +} + +#define ql_head_initializer(a_head) {NULL} + +#define ql_elm(a_type) qr(a_type) + +/* List functions. */ +#define ql_new(a_head) do { \ + (a_head)->qlh_first = NULL; \ +} while (0) + +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) + +#define ql_first(a_head) ((a_head)->qlh_first) + +#define ql_last(a_head, a_field) \ + ((ql_first(a_head) != NULL) \ + ? qr_prev(ql_first(a_head), a_field) : NULL) + +#define ql_next(a_head, a_elm, a_field) \ + ((ql_last(a_head, a_field) != (a_elm)) \ + ? qr_next((a_elm), a_field) : NULL) + +#define ql_prev(a_head, a_elm, a_field) \ + ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ + : NULL) + +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ + qr_before_insert((a_qlelm), (a_elm), a_field); \ + if (ql_first(a_head) == (a_qlelm)) { \ + ql_first(a_head) = (a_elm); \ + } \ +} while (0) + +#define ql_after_insert(a_qlelm, a_elm, a_field) \ + qr_after_insert((a_qlelm), (a_elm), a_field) + +#define ql_head_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = (a_elm); \ +} while (0) + +#define ql_tail_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = qr_next((a_elm), a_field); \ +} while (0) + +#define ql_remove(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) == (a_elm)) { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ + } \ + if (ql_first(a_head) != (a_elm)) { \ + qr_remove((a_elm), a_field); \ + } else { \ + ql_first(a_head) = NULL; \ + } \ +} while (0) + +#define ql_head_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_first(a_head); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_tail_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_last(a_head, a_field); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_foreach(a_var, a_head, a_field) \ + qr_foreach((a_var), ql_first(a_head), a_field) + +#define ql_reverse_foreach(a_var, a_head, a_field) \ + qr_reverse_foreach((a_var), ql_first(a_head), a_field) diff --git a/sources/jemalloc/include-win/jemalloc/internal/qr.h b/sources/jemalloc/include-win/jemalloc/internal/qr.h new file mode 100755 index 00000000..0fbaec25 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/qr.h @@ -0,0 +1,69 @@ +/* Ring definitions. */ +#define qr(a_type) \ +struct { \ + a_type *qre_next; \ + a_type *qre_prev; \ +} + +/* Ring functions. */ +#define qr_new(a_qr, a_field) do { \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) + +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) + +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qrelm); \ + (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ + (a_qrelm)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_after_insert(a_qrelm, a_qr, a_field) \ + do \ + { \ + (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ + (a_qr)->a_field.qre_prev = (a_qrelm); \ + (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ + (a_qrelm)->a_field.qre_next = (a_qr); \ + } while (0) + +#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ + void *t; \ + (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ + t = (a_qr_a)->a_field.qre_prev; \ + (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ + (a_qr_b)->a_field.qre_prev = t; \ +} while (0) + +/* + * qr_meld() and qr_split() are functionally equivalent, so there's no need to + * have two copies of the code. + */ +#define qr_split(a_qr_a, a_qr_b, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_field) + +#define qr_remove(a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev->a_field.qre_next \ + = (a_qr)->a_field.qre_next; \ + (a_qr)->a_field.qre_next->a_field.qre_prev \ + = (a_qr)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_foreach(var, a_qr, a_field) \ + for ((var) = (a_qr); \ + (var) != NULL; \ + (var) = (((var)->a_field.qre_next != (a_qr)) \ + ? (var)->a_field.qre_next : NULL)) + +#define qr_reverse_foreach(var, a_qr, a_field) \ + for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ + (var) != NULL; \ + (var) = (((var) != (a_qr)) \ + ? (var)->a_field.qre_prev : NULL)) diff --git a/sources/jemalloc/include-win/jemalloc/internal/quarantine.h b/sources/jemalloc/include-win/jemalloc/internal/quarantine.h new file mode 100755 index 00000000..ae607399 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/quarantine.h @@ -0,0 +1,60 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct quarantine_obj_s quarantine_obj_t; +typedef struct quarantine_s quarantine_t; + +/* Default per thread quarantine size if valgrind is enabled. */ +#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct quarantine_obj_s { + void *ptr; + size_t usize; +}; + +struct quarantine_s { + size_t curbytes; + size_t curobjs; + size_t first; +#define LG_MAXOBJS_INIT 10 + size_t lg_maxobjs; + quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void quarantine_alloc_hook_work(tsd_t *tsd); +void quarantine(tsd_t *tsd, void *ptr); +void quarantine_cleanup(tsd_t *tsd); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void quarantine_alloc_hook(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) +JEMALLOC_ALWAYS_INLINE void +quarantine_alloc_hook(void) +{ + tsd_t *tsd; + + assert(config_fill && opt_quarantine); + + tsd = tsd_fetch(); + if (tsd_quarantine_get(tsd) == NULL) + quarantine_alloc_hook_work(tsd); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-win/jemalloc/internal/rb.h b/sources/jemalloc/include-win/jemalloc/internal/rb.h new file mode 100755 index 00000000..3770342f --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/rb.h @@ -0,0 +1,1003 @@ +/*- + ******************************************************************************* + * + * cpp macro implementation of left-leaning 2-3 red-black trees. Parent + * pointers are not used, and color bits are stored in the least significant + * bit of right-child pointers (if RB_COMPACT is defined), thus making node + * linkage as compact as is possible for red-black trees. + * + * Usage: + * + * #include + * #include + * #define NDEBUG // (Optional, see assert(3).) + * #include + * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) + * #include + * ... + * + ******************************************************************************* + */ + +#ifndef RB_H_ +#define RB_H_ + +#ifdef RB_COMPACT +/* Node structure. */ +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right_red; \ +} +#else +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right; \ + bool rbn_red; \ +} +#endif + +/* Root structure. */ +#define rb_tree(a_type) \ +struct { \ + a_type *rbt_root; \ +} + +/* Left accessors. */ +#define rbtn_left_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_left) +#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ + (a_node)->a_field.rbn_left = a_left; \ +} while (0) + +#ifdef RB_COMPACT +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ + & ((ssize_t)-2))) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ + | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ + & ((size_t)1))) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ + | ((ssize_t)a_red)); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ + (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ +} while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + /* Bookkeeping bit cannot be used by node pointer. */ \ + assert(((uintptr_t)(a_node) & 0x1) == 0); \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) +#else +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_right) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right = a_right; \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_red) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_red = (a_red); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = true; \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = false; \ +} while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) +#endif + +/* Tree initializer. */ +#define rb_new(a_type, a_field, a_rbt) do { \ + (a_rbt)->rbt_root = NULL; \ +} while (0) + +/* Internal utility macros. */ +#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != NULL) { \ + for (; \ + rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != NULL) { \ + for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ + rbtn_right_set(a_type, a_field, (a_node), \ + rbtn_left_get(a_type, a_field, (r_node))); \ + rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ + rbtn_left_set(a_type, a_field, (a_node), \ + rbtn_right_get(a_type, a_field, (r_node))); \ + rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +/* + * The rb_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to rb_gen(). + */ + +#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree); \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg); \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg); + +/* + * The rb_gen() macro generates a type-specific red-black tree implementation, + * based on the above cpp macros. + * + * Arguments: + * + * a_attr : Function attribute for generated functions (ex: static). + * a_prefix : Prefix for generated functions (ex: ex_). + * a_rb_type : Type for red-black tree data structure (ex: ex_t). + * a_type : Type for red-black tree node data structure (ex: ex_node_t). + * a_field : Name of red-black tree node linkage (ex: ex_link). + * a_cmp : Node comparison function name, with the following prototype: + * int (a_cmp *)(a_type *a_node, a_type *a_other); + * ^^^^^^ + * or a_key + * Interpretation of comparison function return values: + * -1 : a_node < a_other + * 0 : a_node == a_other + * 1 : a_node > a_other + * In all cases, the a_node or a_key macro argument is the first + * argument to the comparison function, which makes it possible + * to write comparison functions that treat the first argument + * specially. + * + * Assuming the following setup: + * + * typedef struct ex_node_s ex_node_t; + * struct ex_node_s { + * rb_node(ex_node_t) ex_link; + * }; + * typedef rb_tree(ex_node_t) ex_t; + * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) + * + * The following API is generated: + * + * static void + * ex_new(ex_t *tree); + * Description: Initialize a red-black tree structure. + * Args: + * tree: Pointer to an uninitialized red-black tree object. + * + * static bool + * ex_empty(ex_t *tree); + * Description: Determine whether tree is empty. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: True if tree is empty, false otherwise. + * + * static ex_node_t * + * ex_first(ex_t *tree); + * static ex_node_t * + * ex_last(ex_t *tree); + * Description: Get the first/last node in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: First/last node in tree, or NULL if tree is empty. + * + * static ex_node_t * + * ex_next(ex_t *tree, ex_node_t *node); + * static ex_node_t * + * ex_prev(ex_t *tree, ex_node_t *node); + * Description: Get node's successor/predecessor. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: A node in tree. + * Ret: node's successor/predecessor in tree, or NULL if node is + * last/first. + * + * static ex_node_t * + * ex_search(ex_t *tree, const ex_node_t *key); + * Description: Search for node that matches key. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or NULL if no match. + * + * static ex_node_t * + * ex_nsearch(ex_t *tree, const ex_node_t *key); + * static ex_node_t * + * ex_psearch(ex_t *tree, const ex_node_t *key); + * Description: Search for node that matches key. If no match is found, + * return what would be key's successor/predecessor, were + * key in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or if no match, hypothetical node's + * successor/predecessor (NULL if no successor/predecessor). + * + * static void + * ex_insert(ex_t *tree, ex_node_t *node); + * Description: Insert node into tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node to be inserted into tree. + * + * static void + * ex_remove(ex_t *tree, ex_node_t *node); + * Description: Remove node from tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node in tree to be removed. + * + * static ex_node_t * + * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * static ex_node_t * + * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * Description: Iterate forward/backward over tree, starting at node. If + * tree is modified, iteration must be immediately + * terminated by the callback function that causes the + * modification. + * Args: + * tree : Pointer to an initialized red-black tree object. + * start: Node at which to start iteration, or NULL to start at + * first/last node. + * cb : Callback function, which is called for each node during + * iteration. Under normal circumstances the callback function + * should return NULL, which causes iteration to continue. If a + * callback function returns non-NULL, iteration is immediately + * terminated and the non-NULL return value is returned by the + * iterator. This is useful for re-starting iteration after + * modifying tree. + * arg : Opaque pointer passed to cb(). + * Ret: NULL if iteration completed, or the non-NULL callback return value + * that caused termination of the iteration. + * + * static void + * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); + * Description: Iterate over the tree with post-order traversal, remove + * each node, and run the callback if non-null. This is + * used for destroying a tree without paying the cost to + * rebalance it. The tree must not be otherwise altered + * during traversal. + * Args: + * tree: Pointer to an initialized red-black tree object. + * cb : Callback function, which, if non-null, is called for each node + * during iteration. There is no way to stop iteration once it + * has begun. + * arg : Opaque pointer passed to cb(). + */ +#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree) { \ + rb_new(a_type, a_field, rbtree); \ +} \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree) { \ + return (rbtree->rbt_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_right_get(a_type, a_field, node) != NULL) { \ + rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != NULL); \ + ret = NULL; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != NULL); \ + } \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_left_get(a_type, a_field, node) != NULL) { \ + rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != NULL); \ + ret = NULL; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != NULL); \ + } \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + int cmp; \ + ret = rbtree->rbt_root; \ + while (ret != NULL \ + && (cmp = (a_cmp)(key, ret)) != 0) { \ + if (cmp < 0) { \ + ret = rbtn_left_get(a_type, a_field, ret); \ + } else { \ + ret = rbtn_right_get(a_type, a_field, ret); \ + } \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = NULL; \ + while (tnode != NULL) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = NULL; \ + while (tnode != NULL) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + return (ret); \ +} \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } path[sizeof(void *) << 4], *pathp; \ + rbt_node_new(a_type, a_field, rbtree, node); \ + /* Wind. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != NULL; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + assert(cmp != 0); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + } \ + } \ + pathp->node = node; \ + /* Unwind. */ \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + a_type *cnode = pathp->node; \ + if (pathp->cmp < 0) { \ + a_type *left = pathp[1].node; \ + rbtn_left_set(a_type, a_field, cnode, left); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* Fix up 4-node. */ \ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, cnode, tnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } else { \ + a_type *right = pathp[1].node; \ + rbtn_right_set(a_type, a_field, cnode, right); \ + if (rbtn_red_get(a_type, a_field, right)) { \ + a_type *left = rbtn_left_get(a_type, a_field, cnode); \ + if (left != NULL && rbtn_red_get(a_type, a_field, \ + left)) { \ + /* Split 4-node. */ \ + rbtn_black_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, right); \ + rbtn_red_set(a_type, a_field, cnode); \ + } else { \ + /* Lean left. */ \ + a_type *tnode; \ + bool tred = rbtn_red_get(a_type, a_field, cnode); \ + rbtn_rotate_left(a_type, a_field, cnode, tnode); \ + rbtn_color_set(a_type, a_field, tnode, tred); \ + rbtn_red_set(a_type, a_field, cnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } \ + pathp->node = cnode; \ + } \ + /* Set root, and make it black. */ \ + rbtree->rbt_root = path->node; \ + rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ +} \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } *pathp, *nodep, path[sizeof(void *) << 4]; \ + /* Wind. */ \ + nodep = NULL; /* Silence compiler warning. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != NULL; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + if (cmp == 0) { \ + /* Find node's successor, in preparation for swap. */ \ + pathp->cmp = 1; \ + nodep = pathp; \ + for (pathp++; pathp->node != NULL; \ + pathp++) { \ + pathp->cmp = -1; \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } \ + break; \ + } \ + } \ + } \ + assert(nodep->node == node); \ + pathp--; \ + if (pathp->node != node) { \ + /* Swap node with its successor. */ \ + bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ + rbtn_color_set(a_type, a_field, pathp->node, \ + rbtn_red_get(a_type, a_field, node)); \ + rbtn_left_set(a_type, a_field, pathp->node, \ + rbtn_left_get(a_type, a_field, node)); \ + /* If node's successor is its right child, the following code */\ + /* will do the wrong thing for the right child pointer. */\ + /* However, it doesn't matter, because the pointer will be */\ + /* properly set when the successor is pruned. */\ + rbtn_right_set(a_type, a_field, pathp->node, \ + rbtn_right_get(a_type, a_field, node)); \ + rbtn_color_set(a_type, a_field, node, tred); \ + /* The pruned leaf node's child pointers are never accessed */\ + /* again, so don't bother setting them to nil. */\ + nodep->node = pathp->node; \ + pathp->node = node; \ + if (nodep == path) { \ + rbtree->rbt_root = nodep->node; \ + } else { \ + if (nodep[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } else { \ + rbtn_right_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } \ + } \ + } else { \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + if (left != NULL) { \ + /* node has no successor, but it has a left child. */\ + /* Splice node out, without losing the left child. */\ + assert(!rbtn_red_get(a_type, a_field, node)); \ + assert(rbtn_red_get(a_type, a_field, left)); \ + rbtn_black_set(a_type, a_field, left); \ + if (pathp == path) { \ + rbtree->rbt_root = left; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + left); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + left); \ + } \ + } \ + return; \ + } else if (pathp == path) { \ + /* The tree only contained one node. */ \ + rbtree->rbt_root = NULL; \ + return; \ + } \ + } \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + /* Prune red node, which requires no fixup. */ \ + assert(pathp[-1].cmp < 0); \ + rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ + return; \ + } \ + /* The node to be pruned is black, so unwind until balance is */\ + /* restored. */\ + pathp->node = NULL; \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + assert(pathp->cmp != 0); \ + if (pathp->cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + a_type *tnode; \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ + /* In the following diagrams, ||, //, and \\ */\ + /* indicate the path to the removed node. */\ + /* */\ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + /* */\ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + /* */\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, rightleft); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + a_type *tnode; \ + rbtn_red_set(a_type, a_field, pathp->node); \ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + pathp->node = tnode; \ + } \ + } \ + } else { \ + a_type *left; \ + rbtn_right_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + left = rbtn_left_get(a_type, a_field, pathp->node); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *tnode; \ + a_type *leftright = rbtn_right_get(a_type, a_field, \ + left); \ + a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ + leftright); \ + if (leftrightleft != NULL && rbtn_red_get(a_type, \ + a_field, leftrightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (r) */\ + a_type *unode; \ + rbtn_black_set(a_type, a_field, leftrightleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + unode); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_right_set(a_type, a_field, unode, tnode); \ + rbtn_rotate_left(a_type, a_field, unode, tnode); \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (b) */\ + assert(leftright != NULL); \ + rbtn_red_set(a_type, a_field, leftright); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_black_set(a_type, a_field, tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root, which may actually be the tree root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + } \ + return; \ + } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, pathp->node); \ + /* Balance restored. */ \ + return; \ + } \ + } else { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + } \ + } \ + } \ + } \ + /* Set root. */ \ + rbtree->rbt_root = path->node; \ + assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ +} \ +a_attr a_type * \ +a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return (NULL); \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ + a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ + arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp < 0) { \ + a_type *ret; \ + if ((ret = a_prefix##iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } else if (cmp > 0) { \ + return (a_prefix##iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ + cb, arg); \ + } else { \ + ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return (NULL); \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp > 0) { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } else if (cmp < 0) { \ + return (a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtree->rbt_root, cb, arg); \ + } else { \ + ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ + cb, arg); \ + } \ + return (ret); \ +} \ +a_attr void \ +a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ + a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return; \ + } \ + a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_left_set(a_type, a_field, (node), NULL); \ + a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_right_set(a_type, a_field, (node), NULL); \ + if (cb) { \ + cb(node, arg); \ + } \ +} \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg) { \ + a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ + rbtree->rbt_root = NULL; \ +} + +#endif /* RB_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/internal/rtree.h b/sources/jemalloc/include-win/jemalloc/internal/rtree.h new file mode 100755 index 00000000..8d0c584d --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/rtree.h @@ -0,0 +1,366 @@ +/* + * This radix tree implementation is tailored to the singular purpose of + * associating metadata with chunks that are currently owned by jemalloc. + * + ******************************************************************************* + */ +#ifdef JEMALLOC_H_TYPES + +typedef struct rtree_node_elm_s rtree_node_elm_t; +typedef struct rtree_level_s rtree_level_t; +typedef struct rtree_s rtree_t; + +/* + * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the + * machine address width. + */ +#define LG_RTREE_BITS_PER_LEVEL 4 +#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL) +/* Maximum rtree height. */ +#define RTREE_HEIGHT_MAX \ + ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) + +/* Used for two-stage lock-free node initialization. */ +#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) + +/* + * The node allocation callback function's argument is the number of contiguous + * rtree_node_elm_t structures to allocate, and the resulting memory must be + * zeroed. + */ +typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); +typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct rtree_node_elm_s { + union { + void *pun; + rtree_node_elm_t *child; + extent_node_t *val; + }; +}; + +struct rtree_level_s { + /* + * A non-NULL subtree points to a subtree rooted along the hypothetical + * path to the leaf node corresponding to key 0. Depending on what keys + * have been used to store to the tree, an arbitrary combination of + * subtree pointers may remain NULL. + * + * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. + * This results in a 3-level tree, and the leftmost leaf can be directly + * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding + * 0x00000000) can be accessed via subtrees[1], and the remainder of the + * tree can be accessed via subtrees[0]. + * + * levels[0] : [ | 0x0001******** | 0x0002******** | ...] + * + * levels[1] : [ | 0x00000001**** | 0x00000002**** | ... ] + * + * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] + * + * This has practical implications on x64, which currently uses only the + * lower 47 bits of virtual address space in userland, thus leaving + * subtrees[0] unused and avoiding a level of tree traversal. + */ + union { + void *subtree_pun; + rtree_node_elm_t *subtree; + }; + /* Number of key bits distinguished by this level. */ + unsigned bits; + /* + * Cumulative number of key bits distinguished by traversing to + * corresponding tree level. + */ + unsigned cumbits; +}; + +struct rtree_s { + rtree_node_alloc_t *alloc; + rtree_node_dalloc_t *dalloc; + unsigned height; + /* + * Precomputed table used to convert from the number of leading 0 key + * bits to which subtree level to start at. + */ + unsigned start_level[RTREE_HEIGHT_MAX]; + rtree_level_t levels[RTREE_HEIGHT_MAX]; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, + rtree_node_dalloc_t *dalloc); +void rtree_delete(rtree_t *rtree); +rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, + unsigned level); +rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, + rtree_node_elm_t *elm, unsigned level); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); +uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); + +bool rtree_node_valid(rtree_node_elm_t *node); +rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm, + bool dependent); +rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent); +extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, + bool dependent); +void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, + const extent_node_t *val); +rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level, + bool dependent); +rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level, + bool dependent); + +extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); +bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) +JEMALLOC_ALWAYS_INLINE unsigned +rtree_start_level(rtree_t *rtree, uintptr_t key) +{ + unsigned start_level; + + if (unlikely(key == 0)) + return (rtree->height - 1); + + start_level = rtree->start_level[lg_floor(key) >> + LG_RTREE_BITS_PER_LEVEL]; + assert(start_level < rtree->height); + return (start_level); +} + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) +{ + + return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - + rtree->levels[level].cumbits)) & ((ZU(1) << + rtree->levels[level].bits) - 1)); +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_node_valid(rtree_node_elm_t *node) +{ + + return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); +} + +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_child_tryread(rtree_node_elm_t *elm, bool dependent) +{ + rtree_node_elm_t *child; + + /* Double-checked read (first read may be stale. */ + child = elm->child; + if (!dependent && !rtree_node_valid(child)) + child = atomic_read_p(&elm->pun); + assert(!dependent || child != NULL); + return (child); +} + +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, + bool dependent) +{ + rtree_node_elm_t *child; + + child = rtree_child_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_node_valid(child))) + child = rtree_child_read_hard(rtree, elm, level); + assert(!dependent || child != NULL); + return (child); +} + +JEMALLOC_ALWAYS_INLINE extent_node_t * +rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) +{ + + if (dependent) { + /* + * Reading a val on behalf of a pointer to a valid allocation is + * guaranteed to be a clean read even without synchronization, + * because the rtree update became visible in memory before the + * pointer came into existence. + */ + return (elm->val); + } else { + /* + * An arbitrary read, e.g. on behalf of ivsalloc(), may not be + * dependent on a previous rtree write, which means a stale read + * could result if synchronization were omitted here. + */ + return (atomic_read_p(&elm->pun)); + } +} + +JEMALLOC_INLINE void +rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) +{ + + atomic_write_p(&elm->pun, val); +} + +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) +{ + rtree_node_elm_t *subtree; + + /* Double-checked read (first read may be stale. */ + subtree = rtree->levels[level].subtree; + if (!dependent && unlikely(!rtree_node_valid(subtree))) + subtree = atomic_read_p(&rtree->levels[level].subtree_pun); + assert(!dependent || subtree != NULL); + return (subtree); +} + +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent) +{ + rtree_node_elm_t *subtree; + + subtree = rtree_subtree_tryread(rtree, level, dependent); + if (!dependent && unlikely(!rtree_node_valid(subtree))) + subtree = rtree_subtree_read_hard(rtree, level); + assert(!dependent || subtree != NULL); + return (subtree); +} + +JEMALLOC_ALWAYS_INLINE extent_node_t * +rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) +{ + uintptr_t subkey; + unsigned start_level; + rtree_node_elm_t *node; + + start_level = rtree_start_level(rtree, key); + + node = rtree_subtree_tryread(rtree, start_level, dependent); +#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) + switch (start_level + RTREE_GET_BIAS) { +#define RTREE_GET_SUBTREE(level) \ + case level: \ + assert(level < (RTREE_HEIGHT_MAX-1)); \ + if (!dependent && unlikely(!rtree_node_valid(node))) \ + return (NULL); \ + subkey = rtree_subkey(rtree, key, level - \ + RTREE_GET_BIAS); \ + node = rtree_child_tryread(&node[subkey], dependent); \ + /* Fall through. */ +#define RTREE_GET_LEAF(level) \ + case level: \ + assert(level == (RTREE_HEIGHT_MAX-1)); \ + if (!dependent && unlikely(!rtree_node_valid(node))) \ + return (NULL); \ + subkey = rtree_subkey(rtree, key, level - \ + RTREE_GET_BIAS); \ + /* \ + * node is a leaf, so it contains values rather than \ + * child pointers. \ + */ \ + return (rtree_val_read(rtree, &node[subkey], \ + dependent)); +#if RTREE_HEIGHT_MAX > 1 + RTREE_GET_SUBTREE(0) +#endif +#if RTREE_HEIGHT_MAX > 2 + RTREE_GET_SUBTREE(1) +#endif +#if RTREE_HEIGHT_MAX > 3 + RTREE_GET_SUBTREE(2) +#endif +#if RTREE_HEIGHT_MAX > 4 + RTREE_GET_SUBTREE(3) +#endif +#if RTREE_HEIGHT_MAX > 5 + RTREE_GET_SUBTREE(4) +#endif +#if RTREE_HEIGHT_MAX > 6 + RTREE_GET_SUBTREE(5) +#endif +#if RTREE_HEIGHT_MAX > 7 + RTREE_GET_SUBTREE(6) +#endif +#if RTREE_HEIGHT_MAX > 8 + RTREE_GET_SUBTREE(7) +#endif +#if RTREE_HEIGHT_MAX > 9 + RTREE_GET_SUBTREE(8) +#endif +#if RTREE_HEIGHT_MAX > 10 + RTREE_GET_SUBTREE(9) +#endif +#if RTREE_HEIGHT_MAX > 11 + RTREE_GET_SUBTREE(10) +#endif +#if RTREE_HEIGHT_MAX > 12 + RTREE_GET_SUBTREE(11) +#endif +#if RTREE_HEIGHT_MAX > 13 + RTREE_GET_SUBTREE(12) +#endif +#if RTREE_HEIGHT_MAX > 14 + RTREE_GET_SUBTREE(13) +#endif +#if RTREE_HEIGHT_MAX > 15 + RTREE_GET_SUBTREE(14) +#endif +#if RTREE_HEIGHT_MAX > 16 +# error Unsupported RTREE_HEIGHT_MAX +#endif + RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1) +#undef RTREE_GET_SUBTREE +#undef RTREE_GET_LEAF + default: not_reached(); + } +#undef RTREE_GET_BIAS + not_reached(); +} + +JEMALLOC_INLINE bool +rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) +{ + uintptr_t subkey; + unsigned i, start_level; + rtree_node_elm_t *node, *child; + + start_level = rtree_start_level(rtree, key); + + node = rtree_subtree_read(rtree, start_level, false); + if (node == NULL) + return (true); + for (i = start_level; /**/; i++, node = child) { + subkey = rtree_subkey(rtree, key, i); + if (i == rtree->height - 1) { + /* + * node is a leaf, so it contains values rather than + * child pointers. + */ + rtree_val_write(rtree, &node[subkey], val); + return (false); + } + assert(i + 1 < rtree->height); + child = rtree_child_read(rtree, &node[subkey], i, false); + if (child == NULL) + return (true); + } + not_reached(); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/size_classes.h b/sources/jemalloc/include-win/jemalloc/internal/size_classes.h new file mode 100755 index 00000000..e4edc4bc --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/size_classes.h @@ -0,0 +1,1428 @@ +/* This file was automatically generated by size_classes.sh. */ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to + * be defined prior to inclusion, and it in turn defines: + * + * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. + * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz, + * bin, lg_delta_lookup) tuples. + * index: Size class index. + * lg_grp: Lg group base size (no deltas added). + * lg_delta: Lg delta to previous size class. + * ndelta: Delta multiplier. size == 1< 255) +# error "Too many small size classes" +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/size_classes.sh b/sources/jemalloc/include-win/jemalloc/internal/size_classes.sh new file mode 100755 index 00000000..f6fbce4e --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/size_classes.sh @@ -0,0 +1,318 @@ +#!/bin/sh +# +# Usage: size_classes.sh + +# The following limits are chosen such that they cover all supported platforms. + +# Pointer sizes. +lg_zarr="2 3" + +# Quanta. +lg_qarr=$1 + +# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. +lg_tmin=$2 + +# Maximum lookup size. +lg_kmax=12 + +# Page sizes. +lg_parr=`echo $3 | tr ',' ' '` + +# Size class group size (number of size classes for each size doubling). +lg_g=$4 + +pow2() { + e=$1 + pow2_result=1 + while [ ${e} -gt 0 ] ; do + pow2_result=$((${pow2_result} + ${pow2_result})) + e=$((${e} - 1)) + done +} + +lg() { + x=$1 + lg_result=0 + while [ ${x} -gt 1 ] ; do + lg_result=$((${lg_result} + 1)) + x=$((${x} / 2)) + done +} + +size_class() { + index=$1 + lg_grp=$2 + lg_delta=$3 + ndelta=$4 + lg_p=$5 + lg_kmax=$6 + + if [ ${lg_delta} -ge ${lg_p} ] ; then + psz="yes" + else + pow2 ${lg_p}; p=${pow2_result} + pow2 ${lg_grp}; grp=${pow2_result} + pow2 ${lg_delta}; delta=${pow2_result} + sz=$((${grp} + ${delta} * ${ndelta})) + npgs=$((${sz} / ${p})) + if [ ${sz} -eq $((${npgs} * ${p})) ] ; then + psz="yes" + else + psz="no" + fi + fi + + lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} + if [ ${pow2_result} -lt ${ndelta} ] ; then + rem="yes" + else + rem="no" + fi + + lg_size=${lg_grp} + if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then + lg_size=$((${lg_grp} + 1)) + else + lg_size=${lg_grp} + rem="yes" + fi + + if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then + bin="yes" + else + bin="no" + fi + if [ ${lg_size} -lt ${lg_kmax} \ + -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then + lg_delta_lookup=${lg_delta} + else + lg_delta_lookup="no" + fi + printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup} + # Defined upon return: + # - psz ("yes" or "no") + # - bin ("yes" or "no") + # - lg_delta_lookup (${lg_delta} or "no") +} + +sep_line() { + echo " \\" +} + +size_classes() { + lg_z=$1 + lg_q=$2 + lg_t=$3 + lg_p=$4 + lg_g=$5 + + pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} + pow2 ${lg_g}; g=${pow2_result} + + echo "#define SIZE_CLASSES \\" + echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\" + + ntbins=0 + nlbins=0 + lg_tiny_maxclass='"NA"' + nbins=0 + npsizes=0 + + # Tiny size classes. + ndelta=0 + index=0 + lg_grp=${lg_t} + lg_delta=${lg_grp} + while [ ${lg_grp} -lt ${lg_q} ] ; do + size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} + if [ ${lg_delta_lookup} != "no" ] ; then + nlbins=$((${index} + 1)) + fi + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi + if [ ${bin} != "no" ] ; then + nbins=$((${index} + 1)) + fi + ntbins=$((${ntbins} + 1)) + lg_tiny_maxclass=${lg_grp} # Final written value is correct. + index=$((${index} + 1)) + lg_delta=${lg_grp} + lg_grp=$((${lg_grp} + 1)) + done + + # First non-tiny group. + if [ ${ntbins} -gt 0 ] ; then + sep_line + # The first size class has an unusual encoding, because the size has to be + # split between grp and delta*ndelta. + lg_grp=$((${lg_grp} - 1)) + ndelta=1 + size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} + index=$((${index} + 1)) + lg_grp=$((${lg_grp} + 1)) + lg_delta=$((${lg_delta} + 1)) + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi + fi + while [ ${ndelta} -lt ${g} ] ; do + size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} + index=$((${index} + 1)) + ndelta=$((${ndelta} + 1)) + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi + done + + # All remaining groups. + lg_grp=$((${lg_grp} + ${lg_g})) + while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do + sep_line + ndelta=1 + if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then + ndelta_limit=$((${g} - 1)) + else + ndelta_limit=${g} + fi + while [ ${ndelta} -le ${ndelta_limit} ] ; do + size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} + if [ ${lg_delta_lookup} != "no" ] ; then + nlbins=$((${index} + 1)) + # Final written value is correct: + lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" + fi + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi + if [ ${bin} != "no" ] ; then + nbins=$((${index} + 1)) + # Final written value is correct: + small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" + if [ ${lg_g} -gt 0 ] ; then + lg_large_minclass=$((${lg_grp} + 1)) + else + lg_large_minclass=$((${lg_grp} + 2)) + fi + fi + # Final written value is correct: + huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" + index=$((${index} + 1)) + ndelta=$((${ndelta} + 1)) + done + lg_grp=$((${lg_grp} + 1)) + lg_delta=$((${lg_delta} + 1)) + done + echo + nsizes=${index} + + # Defined upon completion: + # - ntbins + # - nlbins + # - nbins + # - nsizes + # - npsizes + # - lg_tiny_maxclass + # - lookup_maxclass + # - small_maxclass + # - lg_large_minclass + # - huge_maxclass +} + +cat < 255) +# error "Too many small size classes" +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ +EOF diff --git a/sources/jemalloc/include-win/jemalloc/internal/smoothstep.h b/sources/jemalloc/include-win/jemalloc/internal/smoothstep.h new file mode 100755 index 00000000..c5333cca --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/smoothstep.h @@ -0,0 +1,246 @@ +/* + * This file was generated by the following command: + * sh smoothstep.sh smoother 200 24 3 15 + */ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * This header defines a precomputed table based on the smoothstep family of + * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 + * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so + * that floating point math can be avoided. + * + * 3 2 + * smoothstep(x) = -2x + 3x + * + * 5 4 3 + * smootherstep(x) = 6x - 15x + 10x + * + * 7 6 5 4 + * smootheststep(x) = -20x + 70x - 84x + 35x + */ + +#define SMOOTHSTEP_VARIANT "smoother" +#define SMOOTHSTEP_NSTEPS 200 +#define SMOOTHSTEP_BFP 24 +#define SMOOTHSTEP \ + /* STEP(step, h, x, y) */ \ + STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ + STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ + STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ + STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ + STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ + STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ + STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ + STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ + STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ + STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ + STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ + STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ + STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ + STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ + STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ + STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ + STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ + STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ + STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ + STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ + STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ + STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ + STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ + STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ + STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ + STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ + STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ + STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ + STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ + STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ + STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ + STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ + STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ + STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ + STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ + STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ + STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ + STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ + STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ + STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ + STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ + STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ + STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ + STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ + STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ + STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ + STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ + STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ + STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ + STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ + STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ + STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ + STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ + STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ + STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ + STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ + STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ + STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ + STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ + STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ + STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ + STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ + STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ + STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ + STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ + STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ + STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ + STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ + STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ + STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ + STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ + STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ + STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ + STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ + STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ + STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ + STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ + STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ + STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ + STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ + STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ + STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ + STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ + STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ + STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ + STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ + STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ + STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ + STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ + STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ + STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ + STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ + STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ + STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ + STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ + STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ + STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ + STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ + STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ + STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ + STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ + STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ + STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ + STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ + STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ + STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ + STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ + STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ + STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ + STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ + STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ + STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ + STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ + STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ + STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ + STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ + STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ + STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ + STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ + STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ + STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ + STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ + STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ + STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ + STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ + STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ + STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ + STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ + STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ + STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ + STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ + STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ + STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ + STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ + STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ + STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ + STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ + STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ + STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ + STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ + STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ + STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ + STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ + STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ + STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ + STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ + STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ + STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ + STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ + STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ + STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ + STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ + STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ + STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ + STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ + STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ + STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ + STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ + STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ + STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ + STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ + STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ + STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ + STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ + STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ + STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ + STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ + STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ + STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ + STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ + STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ + STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ + STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ + STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ + STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ + STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ + STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ + STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ + STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ + STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ + STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ + STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ + STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ + STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ + STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ + STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ + STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ + STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ + STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ + STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ + STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ + STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ + STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ + STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ + STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ + STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ + STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ + STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ + STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ + STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/smoothstep.sh b/sources/jemalloc/include-win/jemalloc/internal/smoothstep.sh new file mode 100755 index 00000000..8124693f --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/smoothstep.sh @@ -0,0 +1,115 @@ +#!/bin/sh +# +# Generate a discrete lookup table for a sigmoid function in the smoothstep +# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table +# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode +# the entries using a binary fixed point representation. +# +# Usage: smoothstep.sh +# +# is in {smooth, smoother, smoothest}. +# must be greater than zero. +# must be in [0..62]; reasonable values are roughly [10..30]. +# is x decimal precision. +# is y decimal precision. + +#set -x + +cmd="sh smoothstep.sh $*" +variant=$1 +nsteps=$2 +bfp=$3 +xprec=$4 +yprec=$5 + +case "${variant}" in + smooth) + ;; + smoother) + ;; + smoothest) + ;; + *) + echo "Unsupported variant" + exit 1 + ;; +esac + +smooth() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +smoother() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +smoothest() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +cat <iteration = 0; +} + +JEMALLOC_INLINE void +spin_adaptive(spin_t *spin) +{ + volatile uint64_t i; + + for (i = 0; i < (KQU(1) << spin->iteration); i++) + CPU_SPINWAIT; + + if (spin->iteration < 63) + spin->iteration++; +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-win/jemalloc/internal/stats.h b/sources/jemalloc/include-win/jemalloc/internal/stats.h new file mode 100755 index 00000000..04e7dae1 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/stats.h @@ -0,0 +1,197 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct tcache_bin_stats_s tcache_bin_stats_t; +typedef struct malloc_bin_stats_s malloc_bin_stats_t; +typedef struct malloc_large_stats_s malloc_large_stats_t; +typedef struct malloc_huge_stats_s malloc_huge_stats_t; +typedef struct arena_stats_s arena_stats_t; +typedef struct chunk_stats_s chunk_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct tcache_bin_stats_s { + /* + * Number of allocation requests that corresponded to the size of this + * bin. + */ + uint64_t nrequests; +}; + +struct malloc_bin_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the bin. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to the size of this + * bin. This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* + * Current number of regions of this size class, including regions + * currently cached by tcache. + */ + size_t curregs; + + /* Number of tcache fills from this bin. */ + uint64_t nfills; + + /* Number of tcache flushes to this bin. */ + uint64_t nflushes; + + /* Total number of runs created for this bin's size class. */ + uint64_t nruns; + + /* + * Total number of runs reused by extracting them from the runs tree for + * this bin's size class. + */ + uint64_t reruns; + + /* Current number of runs in this bin. */ + size_t curruns; +}; + +struct malloc_large_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to this size class. + * This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* + * Current number of runs of this size class, including runs currently + * cached by tcache. + */ + size_t curruns; +}; + +struct malloc_huge_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* Current number of (multi-)chunk allocations of this size class. */ + size_t curhchunks; +}; + +struct arena_stats_s { + /* Number of bytes currently mapped. */ + size_t mapped; + + /* + * Number of bytes currently retained as a side effect of munmap() being + * disabled/bypassed. Retained bytes are technically mapped (though + * always decommitted or purged), but they are excluded from the mapped + * statistic (above). + */ + size_t retained; + + /* + * Total number of purge sweeps, total number of madvise calls made, + * and total pages purged in order to keep dirty unused memory under + * control. + */ + uint64_t npurge; + uint64_t nmadvise; + uint64_t purged; + + /* + * Number of bytes currently mapped purely for metadata purposes, and + * number of bytes currently allocated for internal metadata. + */ + size_t metadata_mapped; + size_t metadata_allocated; /* Protected via atomic_*_z(). */ + + /* Per-size-category statistics. */ + size_t allocated_large; + uint64_t nmalloc_large; + uint64_t ndalloc_large; + uint64_t nrequests_large; + + size_t allocated_huge; + uint64_t nmalloc_huge; + uint64_t ndalloc_huge; + + /* One element for each large size class. */ + malloc_large_stats_t *lstats; + + /* One element for each huge size class. */ + malloc_huge_stats_t *hstats; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_stats_print; + +extern size_t stats_cactive; + +void stats_print(void (*write)(void *, const char *), void *cbopaque, + const char *opts); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +size_t stats_cactive_get(void); +void stats_cactive_add(size_t size); +void stats_cactive_sub(size_t size); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) +JEMALLOC_INLINE size_t +stats_cactive_get(void) +{ + + return (atomic_read_z(&stats_cactive)); +} + +JEMALLOC_INLINE void +stats_cactive_add(size_t size) +{ + + assert(size > 0); + assert((size & chunksize_mask) == 0); + + atomic_add_z(&stats_cactive, size); +} + +JEMALLOC_INLINE void +stats_cactive_sub(size_t size) +{ + + assert(size > 0); + assert((size & chunksize_mask) == 0); + + atomic_sub_z(&stats_cactive, size); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/tcache.h b/sources/jemalloc/include-win/jemalloc/internal/tcache.h new file mode 100755 index 00000000..5fe5ebfa --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/tcache.h @@ -0,0 +1,472 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct tcache_bin_info_s tcache_bin_info_t; +typedef struct tcache_bin_s tcache_bin_t; +typedef struct tcache_s tcache_t; +typedef struct tcaches_s tcaches_t; + +/* + * tcache pointers close to NULL are used to encode state information that is + * used for two purposes: preventing thread caching on a per thread basis and + * cleaning up during thread shutdown. + */ +#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) +#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) +#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) +#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY + +/* + * Absolute minimum number of cache slots for each small bin. + */ +#define TCACHE_NSLOTS_SMALL_MIN 20 + +/* + * Absolute maximum number of cache slots for each small bin in the thread + * cache. This is an additional constraint beyond that imposed as: twice the + * number of regions per run for this size class. + * + * This constant must be an even number. + */ +#define TCACHE_NSLOTS_SMALL_MAX 200 + +/* Number of cache slots for large size classes. */ +#define TCACHE_NSLOTS_LARGE 20 + +/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ +#define LG_TCACHE_MAXCLASS_DEFAULT 15 + +/* + * TCACHE_GC_SWEEP is the approximate number of allocation events between + * full GC sweeps. Integer rounding may cause the actual number to be + * slightly higher, since GC is performed incrementally. + */ +#define TCACHE_GC_SWEEP 8192 + +/* Number of tcache allocation/deallocation events between incremental GCs. */ +#define TCACHE_GC_INCR \ + ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +typedef enum { + tcache_enabled_false = 0, /* Enable cast to/from bool. */ + tcache_enabled_true = 1, + tcache_enabled_default = 2 +} tcache_enabled_t; + +/* + * Read-only information associated with each element of tcache_t's tbins array + * is stored separately, mainly to reduce memory usage. + */ +struct tcache_bin_info_s { + unsigned ncached_max; /* Upper limit on ncached. */ +}; + +struct tcache_bin_s { + tcache_bin_stats_t tstats; + int low_water; /* Min # cached since last GC. */ + unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ + unsigned ncached; /* # of cached objects. */ + /* + * To make use of adjacent cacheline prefetch, the items in the avail + * stack goes to higher address for newer allocations. avail points + * just above the available space, which means that + * avail[-ncached, ... -1] are available items and the lowest item will + * be allocated first. + */ + void **avail; /* Stack of available objects. */ +}; + +struct tcache_s { + ql_elm(tcache_t) link; /* Used for aggregating stats. */ + uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ + ticker_t gc_ticker; /* Drives incremental GC. */ + szind_t next_gc_bin; /* Next bin to GC. */ + tcache_bin_t tbins[1]; /* Dynamically sized. */ + /* + * The pointer stacks associated with tbins follow as a contiguous + * array. During tcache initialization, the avail pointer in each + * element of tbins is initialized to point to the proper offset within + * this array. + */ +}; + +/* Linkage for list of available (previously used) explicit tcache IDs. */ +struct tcaches_s { + union { + tcache_t *tcache; + tcaches_t *next; + }; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_tcache; +extern ssize_t opt_lg_tcache_max; + +extern tcache_bin_info_t *tcache_bin_info; + +/* + * Number of tcache bins. There are NBINS small-object bins, plus 0 or more + * large-object bins. + */ +extern unsigned nhbins; + +/* Maximum cached size class. */ +extern size_t tcache_maxclass; + +/* + * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and + * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are + * completely disjoint from this data structure. tcaches starts off as a sparse + * array, so it has no physical memory footprint until individual pages are + * touched. This allows the entire array to be allocated the first time an + * explicit tcache is created without a disproportionate impact on memory usage. + */ +extern tcaches_t *tcaches; + +size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); +void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, bool *tcache_success); +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + szind_t binind, unsigned rem); +void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache); +void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, + arena_t *oldarena, arena_t *newarena); +tcache_t *tcache_get_hard(tsd_t *tsd); +tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena); +void tcache_cleanup(tsd_t *tsd); +void tcache_enabled_cleanup(tsd_t *tsd); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsd_t *tsd, unsigned *r_ind); +void tcaches_flush(tsd_t *tsd, unsigned ind); +void tcaches_destroy(tsd_t *tsd, unsigned ind); +bool tcache_boot(tsdn_t *tsdn); +void tcache_prefork(tsdn_t *tsdn); +void tcache_postfork_parent(tsdn_t *tsdn); +void tcache_postfork_child(tsdn_t *tsdn); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void tcache_event(tsd_t *tsd, tcache_t *tcache); +void tcache_flush(void); +bool tcache_enabled_get(void); +tcache_t *tcache_get(tsd_t *tsd, bool create); +void tcache_enabled_set(bool enabled); +void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success); +void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, szind_t ind, bool zero, bool slow_path); +void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, szind_t ind, bool zero, bool slow_path); +void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, + szind_t binind, bool slow_path); +void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, + size_t size, bool slow_path); +tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) +JEMALLOC_INLINE void +tcache_flush(void) +{ + tsd_t *tsd; + + cassert(config_tcache); + + tsd = tsd_fetch(); + tcache_cleanup(tsd); +} + +JEMALLOC_INLINE bool +tcache_enabled_get(void) +{ + tsd_t *tsd; + tcache_enabled_t tcache_enabled; + + cassert(config_tcache); + + tsd = tsd_fetch(); + tcache_enabled = tsd_tcache_enabled_get(tsd); + if (tcache_enabled == tcache_enabled_default) { + tcache_enabled = (tcache_enabled_t)opt_tcache; + tsd_tcache_enabled_set(tsd, tcache_enabled); + } + + return ((bool)tcache_enabled); +} + +JEMALLOC_INLINE void +tcache_enabled_set(bool enabled) +{ + tsd_t *tsd; + tcache_enabled_t tcache_enabled; + + cassert(config_tcache); + + tsd = tsd_fetch(); + + tcache_enabled = (tcache_enabled_t)enabled; + tsd_tcache_enabled_set(tsd, tcache_enabled); + + if (!enabled) + tcache_cleanup(tsd); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcache_get(tsd_t *tsd, bool create) +{ + tcache_t *tcache; + + if (!config_tcache) + return (NULL); + + tcache = tsd_tcache_get(tsd); + if (!create) + return (tcache); + if (unlikely(tcache == NULL) && tsd_nominal(tsd)) { + tcache = tcache_get_hard(tsd); + tsd_tcache_set(tsd, tcache); + } + + return (tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_event(tsd_t *tsd, tcache_t *tcache) +{ + + if (TCACHE_GC_INCR == 0) + return; + + if (unlikely(ticker_tick(&tcache->gc_ticker))) + tcache_event_hard(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) +{ + void *ret; + + if (unlikely(tbin->ncached == 0)) { + tbin->low_water = -1; + *tcache_success = false; + return (NULL); + } + /* + * tcache_success (instead of ret) should be checked upon the return of + * this function. We avoid checking (ret == NULL) because there is + * never a null stored on the avail stack (which is unknown to the + * compiler), and eagerly checking ret would cause pipeline stall + * (waiting for the cacheline). + */ + *tcache_success = true; + ret = *(tbin->avail - tbin->ncached); + tbin->ncached--; + + if (unlikely((int)tbin->ncached < tbin->low_water)) + tbin->low_water = tbin->ncached; + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) +{ + void *ret; + tcache_bin_t *tbin; + bool tcache_success; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + assert(binind < NBINS); + tbin = &tcache->tbins[binind]; + ret = tcache_alloc_easy(tbin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + bool tcache_hard_success; + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); + + ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, + tbin, binind, &tcache_hard_success); + if (tcache_hard_success == false) + return (NULL); + } + + assert(ret); + /* + * Only compute usize if required. The checks in the following if + * statement are all static. + */ + if (config_prof || (slow_path && config_fill) || unlikely(zero)) { + usize = index2size(binind); + assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); + } + + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, + &arena_bin_info[binind], false); + } else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + } else { + if (slow_path && config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &arena_bin_info[binind], + true); + } + memset(ret, 0, usize); + } + + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += usize; + tcache_event(tsd, tcache); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) +{ + void *ret; + tcache_bin_t *tbin; + bool tcache_success; + + assert(binind < nhbins); + tbin = &tcache->tbins[binind]; + ret = tcache_alloc_easy(tbin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + /* + * Only allocate one large object at a time, because it's quite + * expensive to create one and not use it. + */ + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); + + ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero); + if (ret == NULL) + return (NULL); + } else { + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + /* Only compute usize on demand */ + if (config_prof || (slow_path && config_fill) || + unlikely(zero)) { + usize = index2size(binind); + assert(usize <= tcache_maxclass); + } + + if (config_prof && usize == LARGE_MINCLASS) { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(ret); + size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> + LG_PAGE); + arena_mapbits_large_binind_set(chunk, pageind, + BININD_INVALID); + } + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + memset(ret, JEMALLOC_ALLOC_JUNK, + usize); + } else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + } else + memset(ret, 0, usize); + + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += usize; + } + + tcache_event(tsd, tcache); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, + bool slow_path) +{ + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); + + if (slow_path && config_fill && unlikely(opt_junk_free)) + arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); + + tbin = &tcache->tbins[binind]; + tbin_info = &tcache_bin_info[binind]; + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { + tcache_bin_flush_small(tsd, tcache, tbin, binind, + (tbin_info->ncached_max >> 1)); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->ncached++; + *(tbin->avail - tbin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, + bool slow_path) +{ + szind_t binind; + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert((size & PAGE_MASK) == 0); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); + + binind = size2index(size); + + if (slow_path && config_fill && unlikely(opt_junk_free)) + arena_dalloc_junk_large(ptr, size); + + tbin = &tcache->tbins[binind]; + tbin_info = &tcache_bin_info[binind]; + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { + tcache_bin_flush_large(tsd, tbin, binind, + (tbin_info->ncached_max >> 1), tcache); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->ncached++; + *(tbin->avail - tbin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcaches_get(tsd_t *tsd, unsigned ind) +{ + tcaches_t *elm = &tcaches[ind]; + if (unlikely(elm->tcache == NULL)) { + elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd, + NULL)); + } + return (elm->tcache); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/ticker.h b/sources/jemalloc/include-win/jemalloc/internal/ticker.h new file mode 100755 index 00000000..4696e56d --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/ticker.h @@ -0,0 +1,75 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ticker_s ticker_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct ticker_s { + int32_t tick; + int32_t nticks; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void ticker_init(ticker_t *ticker, int32_t nticks); +void ticker_copy(ticker_t *ticker, const ticker_t *other); +int32_t ticker_read(const ticker_t *ticker); +bool ticker_ticks(ticker_t *ticker, int32_t nticks); +bool ticker_tick(ticker_t *ticker); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_)) +JEMALLOC_INLINE void +ticker_init(ticker_t *ticker, int32_t nticks) +{ + + ticker->tick = nticks; + ticker->nticks = nticks; +} + +JEMALLOC_INLINE void +ticker_copy(ticker_t *ticker, const ticker_t *other) +{ + + *ticker = *other; +} + +JEMALLOC_INLINE int32_t +ticker_read(const ticker_t *ticker) +{ + + return (ticker->tick); +} + +JEMALLOC_INLINE bool +ticker_ticks(ticker_t *ticker, int32_t nticks) +{ + + if (unlikely(ticker->tick < nticks)) { + ticker->tick = ticker->nticks; + return (true); + } + ticker->tick -= nticks; + return(false); +} + +JEMALLOC_INLINE bool +ticker_tick(ticker_t *ticker) +{ + + return (ticker_ticks(ticker, 1)); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/tsd.h b/sources/jemalloc/include-win/jemalloc/internal/tsd.h new file mode 100755 index 00000000..9f374335 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/tsd.h @@ -0,0 +1,788 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Maximum number of malloc_tsd users with cleanup functions. */ +#define MALLOC_TSD_CLEANUPS_MAX 2 + +typedef bool (*malloc_tsd_cleanup_t)(void); + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +typedef struct tsd_init_block_s tsd_init_block_t; +typedef struct tsd_init_head_s tsd_init_head_t; +#endif + +typedef struct tsd_s tsd_t; +typedef struct tsdn_s tsdn_t; + +#define TSDN_NULL ((tsdn_t *)0) + +typedef enum { + tsd_state_uninitialized, + tsd_state_nominal, + tsd_state_purgatory, + tsd_state_reincarnated +} tsd_state_t; + +/* + * TLS/TSD-agnostic macro-based implementation of thread-specific data. There + * are five macros that support (at least) three use cases: file-private, + * library-private, and library-private inlined. Following is an example + * library-private tsd variable: + * + * In example.h: + * typedef struct { + * int x; + * int y; + * } example_t; + * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) + * malloc_tsd_types(example_, example_t) + * malloc_tsd_protos(, example_, example_t) + * malloc_tsd_externs(example_, example_t) + * In example.c: + * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) + * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, + * example_tsd_cleanup) + * + * The result is a set of generated functions, e.g.: + * + * bool example_tsd_boot(void) {...} + * bool example_tsd_booted_get(void) {...} + * example_t *example_tsd_get(bool init) {...} + * void example_tsd_set(example_t *val) {...} + * + * Note that all of the functions deal in terms of (a_type *) rather than + * (a_type) so that it is possible to support non-pointer types (unlike + * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is + * cast to (void *). This means that the cleanup function needs to cast the + * function argument to (a_type *), then dereference the resulting pointer to + * access fields, e.g. + * + * void + * example_tsd_cleanup(void *arg) + * { + * example_t *example = (example_t *)arg; + * + * example->x = 42; + * [...] + * if ([want the cleanup function to be called again]) + * example_tsd_set(example); + * } + * + * If example_tsd_set() is called within example_tsd_cleanup(), it will be + * called again. This is similar to how pthreads TSD destruction works, except + * that pthreads only calls the cleanup function again if the value was set to + * non-NULL. + */ + +/* malloc_tsd_types(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_types(a_name, a_type) +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_types(a_name, a_type) +#elif (defined(_WIN32)) +#define malloc_tsd_types(a_name, a_type) \ +typedef struct { \ + bool initialized; \ + a_type val; \ +} a_name##tsd_wrapper_t; +#else +#define malloc_tsd_types(a_name, a_type) \ +typedef struct { \ + bool initialized; \ + a_type val; \ +} a_name##tsd_wrapper_t; +#endif + +/* malloc_tsd_protos(). */ +#define malloc_tsd_protos(a_attr, a_name, a_type) \ +a_attr bool \ +a_name##tsd_boot0(void); \ +a_attr void \ +a_name##tsd_boot1(void); \ +a_attr bool \ +a_name##tsd_boot(void); \ +a_attr bool \ +a_name##tsd_booted_get(void); \ +a_attr a_type * \ +a_name##tsd_get(bool init); \ +a_attr void \ +a_name##tsd_set(a_type *val); + +/* malloc_tsd_externs(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_externs(a_name, a_type) \ +extern __thread a_type a_name##tsd_tls; \ +extern __thread bool a_name##tsd_initialized; \ +extern bool a_name##tsd_booted; +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_externs(a_name, a_type) \ +extern __thread a_type a_name##tsd_tls; \ +extern pthread_key_t a_name##tsd_tsd; \ +extern bool a_name##tsd_booted; +#elif (defined(_WIN32)) +#define malloc_tsd_externs(a_name, a_type) \ +extern DWORD a_name##tsd_tsd; \ +extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ +extern bool a_name##tsd_booted; +#else +#define malloc_tsd_externs(a_name, a_type) \ +extern pthread_key_t a_name##tsd_tsd; \ +extern tsd_init_head_t a_name##tsd_init_head; \ +extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ +extern bool a_name##tsd_booted; +#endif + +/* malloc_tsd_data(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr __thread a_type JEMALLOC_TLS_MODEL \ + a_name##tsd_tls = a_initializer; \ +a_attr __thread bool JEMALLOC_TLS_MODEL \ + a_name##tsd_initialized = false; \ +a_attr bool a_name##tsd_booted = false; +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr __thread a_type JEMALLOC_TLS_MODEL \ + a_name##tsd_tls = a_initializer; \ +a_attr pthread_key_t a_name##tsd_tsd; \ +a_attr bool a_name##tsd_booted = false; +#elif (defined(_WIN32)) +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr DWORD a_name##tsd_tsd; \ +a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ + false, \ + a_initializer \ +}; \ +a_attr bool a_name##tsd_booted = false; +#else +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr pthread_key_t a_name##tsd_tsd; \ +a_attr tsd_init_head_t a_name##tsd_init_head = { \ + ql_head_initializer(blocks), \ + MALLOC_MUTEX_INITIALIZER \ +}; \ +a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ + false, \ + a_initializer \ +}; \ +a_attr bool a_name##tsd_booted = false; +#endif + +/* malloc_tsd_funcs(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_cleanup_wrapper(void) \ +{ \ + \ + if (a_name##tsd_initialized) { \ + a_name##tsd_initialized = false; \ + a_cleanup(&a_name##tsd_tls); \ + } \ + return (a_name##tsd_initialized); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + malloc_tsd_cleanup_register( \ + &a_name##tsd_cleanup_wrapper); \ + } \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + \ + /* Do nothing. */ \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + return (a_name##tsd_boot0()); \ +} \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(bool init) \ +{ \ + \ + assert(a_name##tsd_booted); \ + return (&a_name##tsd_tls); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + \ + assert(a_name##tsd_booted); \ + a_name##tsd_tls = (*val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + a_name##tsd_initialized = true; \ +} +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ + 0) \ + return (true); \ + } \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + \ + /* Do nothing. */ \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + return (a_name##tsd_boot0()); \ +} \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(bool init) \ +{ \ + \ + assert(a_name##tsd_booted); \ + return (&a_name##tsd_tls); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + \ + assert(a_name##tsd_booted); \ + a_name##tsd_tls = (*val); \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)(&a_name##tsd_tls))) { \ + malloc_write(": Error" \ + " setting TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + } \ +} +#elif (defined(_WIN32)) +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_cleanup_wrapper(void) \ +{ \ + DWORD error = GetLastError(); \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + TlsGetValue(a_name##tsd_tsd); \ + SetLastError(error); \ + \ + if (wrapper == NULL) \ + return (false); \ + if (a_cleanup != malloc_tsd_no_cleanup && \ + wrapper->initialized) { \ + wrapper->initialized = false; \ + a_cleanup(&wrapper->val); \ + if (wrapper->initialized) { \ + /* Trigger another cleanup round. */ \ + return (true); \ + } \ + } \ + malloc_tsd_dalloc(wrapper); \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ +{ \ + \ + if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ + malloc_write(": Error setting" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ +} \ +a_attr a_name##tsd_wrapper_t * \ +a_name##tsd_wrapper_get(bool init) \ +{ \ + DWORD error = GetLastError(); \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + TlsGetValue(a_name##tsd_tsd); \ + SetLastError(error); \ + \ + if (init && unlikely(wrapper == NULL)) { \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } else { \ + wrapper->initialized = false; \ + wrapper->val = a_initializer; \ + } \ + a_name##tsd_wrapper_set(wrapper); \ + } \ + return (wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + a_name##tsd_tsd = TlsAlloc(); \ + if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ + return (true); \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + malloc_tsd_cleanup_register( \ + &a_name##tsd_cleanup_wrapper); \ + } \ + a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ + memcpy(wrapper, &a_name##tsd_boot_wrapper, \ + sizeof(a_name##tsd_wrapper_t)); \ + a_name##tsd_wrapper_set(wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + if (a_name##tsd_boot0()) \ + return (true); \ + a_name##tsd_boot1(); \ + return (false); \ +} \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (true); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(bool init) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(init); \ + if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ + return (NULL); \ + return (&wrapper->val); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(true); \ + wrapper->val = *(val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + wrapper->initialized = true; \ +} +#else +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr void \ +a_name##tsd_cleanup_wrapper(void *arg) \ +{ \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ + \ + if (a_cleanup != malloc_tsd_no_cleanup && \ + wrapper->initialized) { \ + wrapper->initialized = false; \ + a_cleanup(&wrapper->val); \ + if (wrapper->initialized) { \ + /* Trigger another cleanup round. */ \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)wrapper)) { \ + malloc_write(": Error" \ + " setting TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + return; \ + } \ + } \ + malloc_tsd_dalloc(wrapper); \ +} \ +a_attr void \ +a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ +{ \ + \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)wrapper)) { \ + malloc_write(": Error setting" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ +} \ +a_attr a_name##tsd_wrapper_t * \ +a_name##tsd_wrapper_get(bool init) \ +{ \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + pthread_getspecific(a_name##tsd_tsd); \ + \ + if (init && unlikely(wrapper == NULL)) { \ + tsd_init_block_t block; \ + wrapper = (a_name##tsd_wrapper_t *) \ + tsd_init_check_recursion(&a_name##tsd_init_head, \ + &block); \ + if (wrapper) \ + return (wrapper); \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + block.data = (void *)wrapper; \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } else { \ + wrapper->initialized = false; \ + wrapper->val = a_initializer; \ + } \ + a_name##tsd_wrapper_set(wrapper); \ + tsd_init_finish(&a_name##tsd_init_head, &block); \ + } \ + return (wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (pthread_key_create(&a_name##tsd_tsd, \ + a_name##tsd_cleanup_wrapper) != 0) \ + return (true); \ + a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ + memcpy(wrapper, &a_name##tsd_boot_wrapper, \ + sizeof(a_name##tsd_wrapper_t)); \ + a_name##tsd_wrapper_set(wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + if (a_name##tsd_boot0()) \ + return (true); \ + a_name##tsd_boot1(); \ + return (false); \ +} \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (true); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(bool init) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(init); \ + if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ + return (NULL); \ + return (&wrapper->val); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(true); \ + wrapper->val = *(val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + wrapper->initialized = true; \ +} +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +struct tsd_init_block_s { + ql_elm(tsd_init_block_t) link; + pthread_t thread; + void *data; +}; +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; +#endif + +#define MALLOC_TSD \ +/* O(name, type) */ \ + O(tcache, tcache_t *) \ + O(thread_allocated, uint64_t) \ + O(thread_deallocated, uint64_t) \ + O(prof_tdata, prof_tdata_t *) \ + O(iarena, arena_t *) \ + O(arena, arena_t *) \ + O(arenas_tdata, arena_tdata_t *) \ + O(narenas_tdata, unsigned) \ + O(arenas_tdata_bypass, bool) \ + O(tcache_enabled, tcache_enabled_t) \ + O(quarantine, quarantine_t *) \ + O(witnesses, witness_list_t) \ + O(witness_fork, bool) \ + +#define TSD_INITIALIZER { \ + tsd_state_uninitialized, \ + NULL, \ + 0, \ + 0, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + 0, \ + false, \ + tcache_enabled_default, \ + NULL, \ + ql_head_initializer(witnesses), \ + false \ +} + +struct tsd_s { + tsd_state_t state; +#define O(n, t) \ + t n; +MALLOC_TSD +#undef O +}; + +/* + * Wrapper around tsd_t that makes it possible to avoid implicit conversion + * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be + * explicitly converted to tsd_t, which is non-nullable. + */ +struct tsdn_s { + tsd_t tsd; +}; + +static const tsd_t tsd_initializer = TSD_INITIALIZER; + +malloc_tsd_types(, tsd_t) + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); +void malloc_tsd_no_cleanup(void *arg); +void malloc_tsd_cleanup_register(bool (*f)(void)); +tsd_t *malloc_tsd_boot0(void); +void malloc_tsd_boot1(void); +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void *tsd_init_check_recursion(tsd_init_head_t *head, + tsd_init_block_t *block); +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); +#endif +void tsd_cleanup(void *arg); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) + +tsd_t *tsd_fetch_impl(bool init); +tsd_t *tsd_fetch(void); +tsdn_t *tsd_tsdn(tsd_t *tsd); +bool tsd_nominal(tsd_t *tsd); +#define O(n, t) \ +t *tsd_##n##p_get(tsd_t *tsd); \ +t tsd_##n##_get(tsd_t *tsd); \ +void tsd_##n##_set(tsd_t *tsd, t n); +MALLOC_TSD +#undef O +tsdn_t *tsdn_fetch(void); +bool tsdn_null(const tsdn_t *tsdn); +tsd_t *tsdn_tsd(tsdn_t *tsdn); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) +malloc_tsd_externs(, tsd_t) +malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_impl(bool init) +{ + tsd_t *tsd = tsd_get(init); + + if (!init && tsd_get_allocates() && tsd == NULL) + return (NULL); + assert(tsd != NULL); + + if (unlikely(tsd->state != tsd_state_nominal)) { + if (tsd->state == tsd_state_uninitialized) { + tsd->state = tsd_state_nominal; + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + } else if (tsd->state == tsd_state_purgatory) { + tsd->state = tsd_state_reincarnated; + tsd_set(tsd); + } else + assert(tsd->state == tsd_state_reincarnated); + } + + return (tsd); +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch(void) +{ + + return (tsd_fetch_impl(true)); +} + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsd_tsdn(tsd_t *tsd) +{ + + return ((tsdn_t *)tsd); +} + +JEMALLOC_INLINE bool +tsd_nominal(tsd_t *tsd) +{ + + return (tsd->state == tsd_state_nominal); +} + +#define O(n, t) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get(tsd_t *tsd) \ +{ \ + \ + return (&tsd->n); \ +} \ + \ +JEMALLOC_ALWAYS_INLINE t \ +tsd_##n##_get(tsd_t *tsd) \ +{ \ + \ + return (*tsd_##n##p_get(tsd)); \ +} \ + \ +JEMALLOC_ALWAYS_INLINE void \ +tsd_##n##_set(tsd_t *tsd, t n) \ +{ \ + \ + assert(tsd->state == tsd_state_nominal); \ + tsd->n = n; \ +} +MALLOC_TSD +#undef O + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsdn_fetch(void) +{ + + if (!tsd_booted_get()) + return (NULL); + + return (tsd_tsdn(tsd_fetch_impl(false))); +} + +JEMALLOC_ALWAYS_INLINE bool +tsdn_null(const tsdn_t *tsdn) +{ + + return (tsdn == NULL); +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsdn_tsd(tsdn_t *tsdn) +{ + + assert(!tsdn_null(tsdn)); + + return (&tsdn->tsd); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/util.h b/sources/jemalloc/include-win/jemalloc/internal/util.h new file mode 100755 index 00000000..4b56d652 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/util.h @@ -0,0 +1,342 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#ifdef _WIN32 +# ifdef _WIN64 +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "ll" +# else +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "" +# endif +# define FMTd32 "d" +# define FMTu32 "u" +# define FMTx32 "x" +# define FMTd64 FMT64_PREFIX "d" +# define FMTu64 FMT64_PREFIX "u" +# define FMTx64 FMT64_PREFIX "x" +# define FMTdPTR FMTPTR_PREFIX "d" +# define FMTuPTR FMTPTR_PREFIX "u" +# define FMTxPTR FMTPTR_PREFIX "x" +#else +# include +# define FMTd32 PRId32 +# define FMTu32 PRIu32 +# define FMTx32 PRIx32 +# define FMTd64 PRId64 +# define FMTu64 PRIu64 +# define FMTx64 PRIx64 +# define FMTdPTR PRIdPTR +# define FMTuPTR PRIuPTR +# define FMTxPTR PRIxPTR +#endif + +/* Size of stack-allocated buffer passed to buferror(). */ +#define BUFERROR_BUF 64 + +/* + * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be + * large enough for all possible uses within jemalloc. + */ +#define MALLOC_PRINTF_BUFSIZE 4096 + +/* Junk fill patterns. */ +#ifndef JEMALLOC_ALLOC_JUNK +# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +#endif +#ifndef JEMALLOC_FREE_JUNK +# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +#endif + +/* + * Wrap a cpp argument that contains commas such that it isn't broken up into + * multiple arguments. + */ +#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ + +/* + * Silence compiler warnings due to uninitialized values. This is used + * wherever the compiler fails to recognize that the variable is never used + * uninitialized. + */ +#ifdef JEMALLOC_CC_SILENCE +# define JEMALLOC_CC_SILENCE_INIT(v) = v +#else +# define JEMALLOC_CC_SILENCE_INIT(v) +#endif + +#ifdef __GNUC__ +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#else +# define likely(x) !!(x) +# define unlikely(x) !!(x) +#endif + +#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) +# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure +#endif + +#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() + +#include "jemalloc/internal/assert.h" + +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#define cassert(c) do { \ + if (unlikely(!(c))) \ + not_reached(); \ +} while (0) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +int buferror(int err, char *buf, size_t buflen); +uintmax_t malloc_strtoumax(const char *restrict nptr, + char **restrict endptr, int base); +void malloc_write(const char *s); + +/* + * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating + * point math. + */ +size_t malloc_vsnprintf(char *str, size_t size, const char *format, + va_list ap); +size_t malloc_snprintf(char *str, size_t size, const char *format, ...) + JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap); +void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, + const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +unsigned ffs_llu(unsigned long long bitmap); +unsigned ffs_lu(unsigned long bitmap); +unsigned ffs_u(unsigned bitmap); +unsigned ffs_zu(size_t bitmap); +unsigned ffs_u64(uint64_t bitmap); +unsigned ffs_u32(uint32_t bitmap); +uint64_t pow2_ceil_u64(uint64_t x); +uint32_t pow2_ceil_u32(uint32_t x); +size_t pow2_ceil_zu(size_t x); +unsigned lg_floor(size_t x); +void set_errno(int errnum); +int get_errno(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) + +/* Sanity check. */ +#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ + || !defined(JEMALLOC_INTERNAL_FFS) +# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure +#endif + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_llu(unsigned long long bitmap) +{ + + return (JEMALLOC_INTERNAL_FFSLL(bitmap)); +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_lu(unsigned long bitmap) +{ + + return (JEMALLOC_INTERNAL_FFSL(bitmap)); +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_u(unsigned bitmap) +{ + + return (JEMALLOC_INTERNAL_FFS(bitmap)); +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_zu(size_t bitmap) +{ + +#if LG_SIZEOF_PTR == LG_SIZEOF_INT + return (ffs_u(bitmap)); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG + return (ffs_lu(bitmap)); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG + return (ffs_llu(bitmap)); +#else +#error No implementation for size_t ffs() +#endif +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_u64(uint64_t bitmap) +{ + +#if LG_SIZEOF_LONG == 3 + return (ffs_lu(bitmap)); +#elif LG_SIZEOF_LONG_LONG == 3 + return (ffs_llu(bitmap)); +#else +#error No implementation for 64-bit ffs() +#endif +} + +JEMALLOC_ALWAYS_INLINE unsigned +ffs_u32(uint32_t bitmap) +{ + +#if LG_SIZEOF_INT == 2 + return (ffs_u(bitmap)); +#else +#error No implementation for 32-bit ffs() +#endif + return (ffs_u(bitmap)); +} + +JEMALLOC_INLINE uint64_t +pow2_ceil_u64(uint64_t x) +{ + + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x |= x >> 32; + x++; + return (x); +} + +JEMALLOC_INLINE uint32_t +pow2_ceil_u32(uint32_t x) +{ + + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + return (x); +} + +/* Compute the smallest power of 2 that is >= x. */ +JEMALLOC_INLINE size_t +pow2_ceil_zu(size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return (pow2_ceil_u64(x)); +#else + return (pow2_ceil_u32(x)); +#endif +} + +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE unsigned +lg_floor(size_t x) +{ + size_t ret; + + assert(x != 0); + + asm ("bsr %1, %0" + : "=r"(ret) // Outputs. + : "r"(x) // Inputs. + ); + assert(ret < UINT_MAX); + return ((unsigned)ret); +} +#elif (defined(_MSC_VER)) +JEMALLOC_INLINE unsigned +lg_floor(size_t x) +{ + unsigned long ret; + + assert(x != 0); + +#if (LG_SIZEOF_PTR == 3) + _BitScanReverse64(&ret, x); +#elif (LG_SIZEOF_PTR == 2) + _BitScanReverse(&ret, x); +#else +# error "Unsupported type size for lg_floor()" +#endif + assert(ret < UINT_MAX); + return ((unsigned)ret); +} +#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) +JEMALLOC_INLINE unsigned +lg_floor(size_t x) +{ + + assert(x != 0); + +#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) + return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); +#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) + return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); +#else +# error "Unsupported type size for lg_floor()" +#endif +} +#else +JEMALLOC_INLINE unsigned +lg_floor(size_t x) +{ + + assert(x != 0); + + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); +#if (LG_SIZEOF_PTR == 3) + x |= (x >> 32); +#endif + if (x == SIZE_T_MAX) + return ((8 << LG_SIZEOF_PTR) - 1); + x++; + return (ffs_zu(x) - 2); +} +#endif + +/* Set error code. */ +JEMALLOC_INLINE void +set_errno(int errnum) +{ + +#ifdef _WIN32 + SetLastError(errnum); +#else + errno = errnum; +#endif +} + +/* Get last error code. */ +JEMALLOC_INLINE int +get_errno(void) +{ + +#ifdef _WIN32 + return (GetLastError()); +#else + return (errno); +#endif +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/internal/valgrind.h b/sources/jemalloc/include-win/jemalloc/internal/valgrind.h new file mode 100755 index 00000000..877a142b --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/valgrind.h @@ -0,0 +1,128 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#ifdef JEMALLOC_VALGRIND +#include + +/* + * The size that is reported to Valgrind must be consistent through a chain of + * malloc..realloc..realloc calls. Request size isn't recorded anywhere in + * jemalloc, so it is critical that all callers of these macros provide usize + * rather than request size. As a result, buffer overflow detection is + * technically weakened for the standard API, though it is generally accepted + * practice to consider any extra bytes reported by malloc_usable_size() as + * usable space. + */ +#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_noaccess(ptr, usize); \ +} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_undefined(ptr, usize); \ +} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_defined(ptr, usize); \ +} while (0) +/* + * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro + * calls must be embedded in macros rather than in functions so that when + * Valgrind reports errors, there are no extra stack frames in the backtraces. + */ +#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ + if (unlikely(in_valgrind && cond)) { \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ + zero); \ + } \ +} while (0) +#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \ + ((ptr) != (old_ptr)) +#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \ + (ptr == NULL) +#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \ + (false) +#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \ + (old_ptr == NULL) +#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \ + old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \ + if (unlikely(in_valgrind)) { \ + size_t rzsize = p2rz(tsdn, ptr); \ + \ + if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \ + old_ptr)) { \ + VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ + usize, rzsize); \ + if (zero && old_usize < usize) { \ + valgrind_make_mem_defined( \ + (void *)((uintptr_t)ptr + \ + old_usize), usize - old_usize); \ + } \ + } else { \ + if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \ + old_ptr_null(old_ptr)) { \ + valgrind_freelike_block(old_ptr, \ + old_rzsize); \ + } \ + if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \ + ptr_null(ptr)) { \ + size_t copy_size = (old_usize < usize) \ + ? old_usize : usize; \ + size_t tail_size = usize - copy_size; \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ + rzsize, false); \ + if (copy_size > 0) { \ + valgrind_make_mem_defined(ptr, \ + copy_size); \ + } \ + if (zero && tail_size > 0) { \ + valgrind_make_mem_defined( \ + (void *)((uintptr_t)ptr + \ + copy_size), tail_size); \ + } \ + } \ + } \ + } \ +} while (0) +#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_freelike_block(ptr, rzsize); \ +} while (0) +#else +#define RUNNING_ON_VALGRIND ((unsigned)0) +#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) +#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ + ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ + zero) do {} while (0) +#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_VALGRIND +void valgrind_make_mem_noaccess(void *ptr, size_t usize); +void valgrind_make_mem_undefined(void *ptr, size_t usize); +void valgrind_make_mem_defined(void *ptr, size_t usize); +void valgrind_freelike_block(void *ptr, size_t usize); +#endif + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/sources/jemalloc/include-win/jemalloc/internal/witness.h b/sources/jemalloc/include-win/jemalloc/internal/witness.h new file mode 100755 index 00000000..30d8c7e9 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/internal/witness.h @@ -0,0 +1,304 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct witness_s witness_t; +typedef unsigned witness_rank_t; +typedef ql_head(witness_t) witness_list_t; +typedef int witness_comp_t (const witness_t *, const witness_t *); + +/* + * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by + * the witness machinery. + */ +#define WITNESS_RANK_OMIT 0U + +#define WITNESS_RANK_MIN 1U + +#define WITNESS_RANK_INIT 1U +#define WITNESS_RANK_CTL 1U +#define WITNESS_RANK_TCACHES 2U +#define WITNESS_RANK_ARENAS 3U + +#define WITNESS_RANK_PROF_DUMP 4U +#define WITNESS_RANK_PROF_BT2GCTX 5U +#define WITNESS_RANK_PROF_TDATAS 6U +#define WITNESS_RANK_PROF_TDATA 7U +#define WITNESS_RANK_PROF_GCTX 8U + +/* + * Used as an argument to witness_assert_depth_to_rank() in order to validate + * depth excluding non-core locks with lower ranks. Since the rank argument to + * witness_assert_depth_to_rank() is inclusive rather than exclusive, this + * definition can have the same value as the minimally ranked core lock. + */ +#define WITNESS_RANK_CORE 9U + +#define WITNESS_RANK_ARENA 9U +#define WITNESS_RANK_ARENA_CHUNKS 10U +#define WITNESS_RANK_ARENA_NODE_CACHE 11U + +#define WITNESS_RANK_BASE 12U + +#define WITNESS_RANK_LEAF 0xffffffffU +#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF +#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF +#define WITNESS_RANK_DSS WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF + +#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}} + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct witness_s { + /* Name, used for printing lock order reversal messages. */ + const char *name; + + /* + * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses + * must be acquired in order of increasing rank. + */ + witness_rank_t rank; + + /* + * If two witnesses are of equal rank and they have the samp comp + * function pointer, it is called as a last attempt to differentiate + * between witnesses of equal rank. + */ + witness_comp_t *comp; + + /* Linkage for thread's currently owned locks. */ + ql_elm(witness_t) link; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp); +#ifdef JEMALLOC_JET +typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); +extern witness_lock_error_t *witness_lock_error; +#else +void witness_lock_error(const witness_list_t *witnesses, + const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_owner_error_t)(const witness_t *); +extern witness_owner_error_t *witness_owner_error; +#else +void witness_owner_error(const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_not_owner_error_t)(const witness_t *); +extern witness_not_owner_error_t *witness_not_owner_error; +#else +void witness_not_owner_error(const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_depth_error_t)(const witness_list_t *, + witness_rank_t rank_inclusive, unsigned depth); +extern witness_depth_error_t *witness_depth_error; +#else +void witness_depth_error(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth); +#endif + +void witnesses_cleanup(tsd_t *tsd); +void witness_fork_cleanup(tsd_t *tsd); +void witness_prefork(tsd_t *tsd); +void witness_postfork_parent(tsd_t *tsd); +void witness_postfork_child(tsd_t *tsd); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool witness_owner(tsd_t *tsd, const witness_t *witness); +void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness); +void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness); +void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive, + unsigned depth); +void witness_assert_depth(tsdn_t *tsdn, unsigned depth); +void witness_assert_lockless(tsdn_t *tsdn); +void witness_lock(tsdn_t *tsdn, witness_t *witness); +void witness_unlock(tsdn_t *tsdn, witness_t *witness); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) +JEMALLOC_INLINE bool +witness_owner(tsd_t *tsd, const witness_t *witness) +{ + witness_list_t *witnesses; + witness_t *w; + + cassert(config_debug); + + witnesses = tsd_witnessesp_get(tsd); + ql_foreach(w, witnesses, link) { + if (w == witness) + return (true); + } + + return (false); +} + +JEMALLOC_INLINE void +witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) +{ + tsd_t *tsd; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + if (witness_owner(tsd, witness)) + return; + witness_owner_error(witness); +} + +JEMALLOC_INLINE void +witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + witnesses = tsd_witnessesp_get(tsd); + ql_foreach(w, witnesses, link) { + if (w == witness) + witness_not_owner_error(witness); + } +} + +JEMALLOC_INLINE void +witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive, + unsigned depth) { + tsd_t *tsd; + unsigned d; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + + d = 0; + witnesses = tsd_witnessesp_get(tsd); + w = ql_last(witnesses, link); + if (w != NULL) { + ql_reverse_foreach(w, witnesses, link) { + if (w->rank < rank_inclusive) { + break; + } + d++; + } + } + if (d != depth) + witness_depth_error(witnesses, rank_inclusive, depth); +} + +JEMALLOC_INLINE void +witness_assert_depth(tsdn_t *tsdn, unsigned depth) { + witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth); +} + +JEMALLOC_INLINE void +witness_assert_lockless(tsdn_t *tsdn) { + witness_assert_depth(tsdn, 0); +} + +JEMALLOC_INLINE void +witness_lock(tsdn_t *tsdn, witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + witness_assert_not_owner(tsdn, witness); + + witnesses = tsd_witnessesp_get(tsd); + w = ql_last(witnesses, link); + if (w == NULL) { + /* No other locks; do nothing. */ + } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) { + /* Forking, and relaxed ranking satisfied. */ + } else if (w->rank > witness->rank) { + /* Not forking, rank order reversal. */ + witness_lock_error(witnesses, witness); + } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != + witness->comp || w->comp(w, witness) > 0)) { + /* + * Missing/incompatible comparison function, or comparison + * function indicates rank order reversal. + */ + witness_lock_error(witnesses, witness); + } + + ql_elm_new(witness, link); + ql_tail_insert(witnesses, witness, link); +} + +JEMALLOC_INLINE void +witness_unlock(tsdn_t *tsdn, witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + /* + * Check whether owner before removal, rather than relying on + * witness_assert_owner() to abort, so that unit tests can test this + * function's failure mode without causing undefined behavior. + */ + if (witness_owner(tsd, witness)) { + witnesses = tsd_witnessesp_get(tsd); + ql_remove(witnesses, witness, link); + } else + witness_assert_owner(tsdn, witness); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc.h b/sources/jemalloc/include-win/jemalloc/jemalloc.h new file mode 100755 index 00000000..eaa16270 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc.h @@ -0,0 +1,376 @@ +#ifndef JEMALLOC_H_ +#define JEMALLOC_H_ +#ifdef __cplusplus +extern "C" { +#endif + +/* Defined if __attribute__((...)) syntax is supported. */ +/* #undef JEMALLOC_HAVE_ATTR */ + +/* Defined if alloc_size attribute is supported. */ +/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */ + +/* Defined if format(gnu_printf, ...) attribute is supported. */ +/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */ + +/* Defined if format(printf, ...) attribute is supported. */ +/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */ + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +/* #undef JEMALLOC_OVERRIDE_MEMALIGN */ +/* #undef JEMALLOC_OVERRIDE_VALLOC */ + +/* + * At least Linux omits the "const" in: + * + * size_t malloc_usable_size(const void *ptr); + * + * Match the operating system's prototype. + */ +#define JEMALLOC_USABLE_SIZE_CONST const + +/* + * If defined, specify throw() for the public function prototypes when compiling + * with C++. The only justification for this is to match the prototypes that + * glibc defines. + */ +/* #undef JEMALLOC_USE_CXX_THROW */ + +#ifdef _MSC_VER +# ifdef _WIN64 +# define LG_SIZEOF_PTR_WIN 3 +# else +# define LG_SIZEOF_PTR_WIN 2 +# endif +#endif + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN + +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#ifndef JEMALLOC_NO_RENAME +# define je_malloc_conf je_malloc_conf +# define je_malloc_message je_malloc_message +# define je_malloc je_malloc +# define je_calloc je_calloc +# define je_posix_memalign je_posix_memalign +# define je_aligned_alloc je_aligned_alloc +# define je_realloc je_realloc +# define je_free je_free +# define je_mallocx je_mallocx +# define je_rallocx je_rallocx +# define je_xallocx je_xallocx +# define je_sallocx je_sallocx +# define je_dallocx je_dallocx +# define je_sdallocx je_sdallocx +# define je_nallocx je_nallocx +# define je_mallctl je_mallctl +# define je_mallctlnametomib je_mallctlnametomib +# define je_mallctlbymib je_mallctlbymib +# define je_malloc_stats_print je_malloc_stats_print +# define je_malloc_usable_size je_malloc_usable_size +#endif + +#include +#include +#include +#include +#include + +#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" +#define JEMALLOC_VERSION_MAJOR 0 +#define JEMALLOC_VERSION_MINOR 0 +#define JEMALLOC_VERSION_BUGFIX 0 +#define JEMALLOC_VERSION_NREV 0 +#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" + +# define MALLOCX_LG_ALIGN(la) ((int)(la)) +# if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +# else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +# endif +# define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if _MSC_VER +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT +# else +# define JEMALLOC_EXPORT +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#endif + +/* + * The je_ prefix on the following public symbol declarations is an artifact + * of namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). + */ +extern JEMALLOC_EXPORT const char *je_malloc_conf; +extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, + const char *s); + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) + JEMALLOC_CXX_THROW; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, + int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *je_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); +#endif + +/* + * void * + * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, + * bool *commit, unsigned arena_ind); + */ +typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); + +/* + * bool + * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); + */ +typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); + +/* + * bool + * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); + +/* + * bool + * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); + +typedef struct { + chunk_alloc_t *alloc; + chunk_dalloc_t *dalloc; + chunk_commit_t *commit; + chunk_decommit_t *decommit; + chunk_purge_t *purge; + chunk_split_t *split; + chunk_merge_t *merge; +} chunk_hooks_t; + +/* + * By default application code must explicitly refer to mangled symbol names, + * so that it is possible to use jemalloc in conjunction with another allocator + * in the same application. Define JEMALLOC_MANGLE in order to cause automatic + * name mangling that matches the API prefixing that happened as a result of + * --with-mangling and/or --with-jemalloc-prefix configuration settings. + */ +#ifdef JEMALLOC_MANGLE +# ifndef JEMALLOC_NO_DEMANGLE +# define JEMALLOC_NO_DEMANGLE +# endif +# define malloc_conf je_malloc_conf +# define malloc_message je_malloc_message +# define malloc je_malloc +# define calloc je_calloc +# define posix_memalign je_posix_memalign +# define aligned_alloc je_aligned_alloc +# define realloc je_realloc +# define free je_free +# define mallocx je_mallocx +# define rallocx je_rallocx +# define xallocx je_xallocx +# define sallocx je_sallocx +# define dallocx je_dallocx +# define sdallocx je_sdallocx +# define nallocx je_nallocx +# define mallctl je_mallctl +# define mallctlnametomib je_mallctlnametomib +# define mallctlbymib je_mallctlbymib +# define malloc_stats_print je_malloc_stats_print +# define malloc_usable_size je_malloc_usable_size +#endif + +/* + * The je_* macros can be used as stable alternative names for the + * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily + * meant for use in jemalloc itself, but it can be used by application code to + * provide isolation from the name mangling specified via --with-mangling + * and/or --with-jemalloc-prefix. + */ +#ifndef JEMALLOC_NO_DEMANGLE +# undef je_malloc_conf +# undef je_malloc_message +# undef je_malloc +# undef je_calloc +# undef je_posix_memalign +# undef je_aligned_alloc +# undef je_realloc +# undef je_free +# undef je_mallocx +# undef je_rallocx +# undef je_xallocx +# undef je_sallocx +# undef je_dallocx +# undef je_sdallocx +# undef je_nallocx +# undef je_mallctl +# undef je_mallctlnametomib +# undef je_mallctlbymib +# undef je_malloc_stats_print +# undef je_malloc_usable_size +#endif + +#ifdef __cplusplus +} +#endif +#endif /* JEMALLOC_H_ */ diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_defs.h b/sources/jemalloc/include-win/jemalloc/jemalloc_defs.h new file mode 100755 index 00000000..f6a0102c --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc_defs.h @@ -0,0 +1,46 @@ +/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ +/* Defined if __attribute__((...)) syntax is supported. */ +/* #undef JEMALLOC_HAVE_ATTR */ + +/* Defined if alloc_size attribute is supported. */ +/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */ + +/* Defined if format(gnu_printf, ...) attribute is supported. */ +/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */ + +/* Defined if format(printf, ...) attribute is supported. */ +/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */ + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +/* #undef JEMALLOC_OVERRIDE_MEMALIGN */ +/* #undef JEMALLOC_OVERRIDE_VALLOC */ + +/* + * At least Linux omits the "const" in: + * + * size_t malloc_usable_size(const void *ptr); + * + * Match the operating system's prototype. + */ +#define JEMALLOC_USABLE_SIZE_CONST const + +/* + * If defined, specify throw() for the public function prototypes when compiling + * with C++. The only justification for this is to match the prototypes that + * glibc defines. + */ +/* #undef JEMALLOC_USE_CXX_THROW */ + +#ifdef _MSC_VER +# ifdef _WIN64 +# define LG_SIZEOF_PTR_WIN 3 +# else +# define LG_SIZEOF_PTR_WIN 2 +# endif +#endif + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_macros.h b/sources/jemalloc/include-win/jemalloc/jemalloc_macros.h new file mode 100755 index 00000000..fdc318ef --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc_macros.h @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include + +#define JEMALLOC_VERSION "0.0.0-0-g0000000000000000000000000000000000000000" +#define JEMALLOC_VERSION_MAJOR 0 +#define JEMALLOC_VERSION_MINOR 0 +#define JEMALLOC_VERSION_BUGFIX 0 +#define JEMALLOC_VERSION_NREV 0 +#define JEMALLOC_VERSION_GID "0000000000000000000000000000000000000000" + +# define MALLOCX_LG_ALIGN(la) ((int)(la)) +# if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +# else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +# endif +# define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if _MSC_VER +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_mangle.h b/sources/jemalloc/include-win/jemalloc/jemalloc_mangle.h new file mode 100755 index 00000000..f6b02d80 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc_mangle.h @@ -0,0 +1,62 @@ +/* + * By default application code must explicitly refer to mangled symbol names, + * so that it is possible to use jemalloc in conjunction with another allocator + * in the same application. Define JEMALLOC_MANGLE in order to cause automatic + * name mangling that matches the API prefixing that happened as a result of + * --with-mangling and/or --with-jemalloc-prefix configuration settings. + */ +#ifdef JEMALLOC_MANGLE +# ifndef JEMALLOC_NO_DEMANGLE +# define JEMALLOC_NO_DEMANGLE +# endif +# define malloc_conf je_malloc_conf +# define malloc_message je_malloc_message +# define malloc je_malloc +# define calloc je_calloc +# define posix_memalign je_posix_memalign +# define aligned_alloc je_aligned_alloc +# define realloc je_realloc +# define free je_free +# define mallocx je_mallocx +# define rallocx je_rallocx +# define xallocx je_xallocx +# define sallocx je_sallocx +# define dallocx je_dallocx +# define sdallocx je_sdallocx +# define nallocx je_nallocx +# define mallctl je_mallctl +# define mallctlnametomib je_mallctlnametomib +# define mallctlbymib je_mallctlbymib +# define malloc_stats_print je_malloc_stats_print +# define malloc_usable_size je_malloc_usable_size +#endif + +/* + * The je_* macros can be used as stable alternative names for the + * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily + * meant for use in jemalloc itself, but it can be used by application code to + * provide isolation from the name mangling specified via --with-mangling + * and/or --with-jemalloc-prefix. + */ +#ifndef JEMALLOC_NO_DEMANGLE +# undef je_malloc_conf +# undef je_malloc_message +# undef je_malloc +# undef je_calloc +# undef je_posix_memalign +# undef je_aligned_alloc +# undef je_realloc +# undef je_free +# undef je_mallocx +# undef je_rallocx +# undef je_xallocx +# undef je_sallocx +# undef je_dallocx +# undef je_sdallocx +# undef je_nallocx +# undef je_mallctl +# undef je_mallctlnametomib +# undef je_mallctlbymib +# undef je_malloc_stats_print +# undef je_malloc_usable_size +#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_mangle_jet.h b/sources/jemalloc/include-win/jemalloc/jemalloc_mangle_jet.h new file mode 100755 index 00000000..a89087ac --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc_mangle_jet.h @@ -0,0 +1,62 @@ +/* + * By default application code must explicitly refer to mangled symbol names, + * so that it is possible to use jemalloc in conjunction with another allocator + * in the same application. Define JEMALLOC_MANGLE in order to cause automatic + * name mangling that matches the API prefixing that happened as a result of + * --with-mangling and/or --with-jemalloc-prefix configuration settings. + */ +#ifdef JEMALLOC_MANGLE +# ifndef JEMALLOC_NO_DEMANGLE +# define JEMALLOC_NO_DEMANGLE +# endif +# define malloc_conf jet_malloc_conf +# define malloc_message jet_malloc_message +# define malloc jet_malloc +# define calloc jet_calloc +# define posix_memalign jet_posix_memalign +# define aligned_alloc jet_aligned_alloc +# define realloc jet_realloc +# define free jet_free +# define mallocx jet_mallocx +# define rallocx jet_rallocx +# define xallocx jet_xallocx +# define sallocx jet_sallocx +# define dallocx jet_dallocx +# define sdallocx jet_sdallocx +# define nallocx jet_nallocx +# define mallctl jet_mallctl +# define mallctlnametomib jet_mallctlnametomib +# define mallctlbymib jet_mallctlbymib +# define malloc_stats_print jet_malloc_stats_print +# define malloc_usable_size jet_malloc_usable_size +#endif + +/* + * The jet_* macros can be used as stable alternative names for the + * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily + * meant for use in jemalloc itself, but it can be used by application code to + * provide isolation from the name mangling specified via --with-mangling + * and/or --with-jemalloc-prefix. + */ +#ifndef JEMALLOC_NO_DEMANGLE +# undef jet_malloc_conf +# undef jet_malloc_message +# undef jet_malloc +# undef jet_calloc +# undef jet_posix_memalign +# undef jet_aligned_alloc +# undef jet_realloc +# undef jet_free +# undef jet_mallocx +# undef jet_rallocx +# undef jet_xallocx +# undef jet_sallocx +# undef jet_dallocx +# undef jet_sdallocx +# undef jet_nallocx +# undef jet_mallctl +# undef jet_mallctlnametomib +# undef jet_mallctlbymib +# undef jet_malloc_stats_print +# undef jet_malloc_usable_size +#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_protos.h b/sources/jemalloc/include-win/jemalloc/jemalloc_protos.h new file mode 100755 index 00000000..ff025e30 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc_protos.h @@ -0,0 +1,66 @@ +/* + * The je_ prefix on the following public symbol declarations is an artifact + * of namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). + */ +extern JEMALLOC_EXPORT const char *je_malloc_conf; +extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, + const char *s); + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) + JEMALLOC_CXX_THROW; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, + int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *je_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); +#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_protos_jet.h b/sources/jemalloc/include-win/jemalloc/jemalloc_protos_jet.h new file mode 100755 index 00000000..f71efef0 --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc_protos_jet.h @@ -0,0 +1,66 @@ +/* + * The jet_ prefix on the following public symbol declarations is an artifact + * of namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h). + */ +extern JEMALLOC_EXPORT const char *jet_malloc_conf; +extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque, + const char *s); + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_free(void *ptr) + JEMALLOC_CXX_THROW; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_sdallocx(void *ptr, size_t size, + int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctl(const char *name, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW jet_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW jet_malloc_stats_print( + void (*write_cb)(void *, const char *), void *jet_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW jet_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *jet_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); +#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_rename.h b/sources/jemalloc/include-win/jemalloc/jemalloc_rename.h new file mode 100755 index 00000000..6c915bde --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc_rename.h @@ -0,0 +1,27 @@ +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#ifndef JEMALLOC_NO_RENAME +# define je_malloc_conf je_malloc_conf +# define je_malloc_message je_malloc_message +# define je_malloc je_malloc +# define je_calloc je_calloc +# define je_posix_memalign je_posix_memalign +# define je_aligned_alloc je_aligned_alloc +# define je_realloc je_realloc +# define je_free je_free +# define je_mallocx je_mallocx +# define je_rallocx je_rallocx +# define je_xallocx je_xallocx +# define je_sallocx je_sallocx +# define je_dallocx je_dallocx +# define je_sdallocx je_sdallocx +# define je_nallocx je_nallocx +# define je_mallctl je_mallctl +# define je_mallctlnametomib je_mallctlnametomib +# define je_mallctlbymib je_mallctlbymib +# define je_malloc_stats_print je_malloc_stats_print +# define je_malloc_usable_size je_malloc_usable_size +#endif diff --git a/sources/jemalloc/include-win/jemalloc/jemalloc_typedefs.h b/sources/jemalloc/include-win/jemalloc/jemalloc_typedefs.h new file mode 100755 index 00000000..fa7b350a --- /dev/null +++ b/sources/jemalloc/include-win/jemalloc/jemalloc_typedefs.h @@ -0,0 +1,57 @@ +/* + * void * + * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, + * bool *commit, unsigned arena_ind); + */ +typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); + +/* + * bool + * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); + */ +typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); + +/* + * bool + * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, + * unsigned arena_ind); + */ +typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); + +/* + * bool + * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); + +/* + * bool + * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + * bool committed, unsigned arena_ind); + */ +typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); + +typedef struct { + chunk_alloc_t *alloc; + chunk_dalloc_t *dalloc; + chunk_commit_t *commit; + chunk_decommit_t *decommit; + chunk_purge_t *purge; + chunk_split_t *split; + chunk_merge_t *merge; +} chunk_hooks_t; diff --git a/sources/jemalloc/include-win/msvc_compat/C99/stdbool.h b/sources/jemalloc/include-win/msvc_compat/C99/stdbool.h new file mode 100755 index 00000000..d92160eb --- /dev/null +++ b/sources/jemalloc/include-win/msvc_compat/C99/stdbool.h @@ -0,0 +1,20 @@ +#ifndef stdbool_h +#define stdbool_h + +#include + +/* MSVC doesn't define _Bool or bool in C, but does have BOOL */ +/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ +/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as + * a built-in type. */ +#ifndef __clang__ +typedef BOOL _Bool; +#endif + +#define bool _Bool +#define true 1 +#define false 0 + +#define __bool_true_false_are_defined 1 + +#endif /* stdbool_h */ diff --git a/sources/jemalloc/include-win/msvc_compat/C99/stdint.h b/sources/jemalloc/include-win/msvc_compat/C99/stdint.h new file mode 100755 index 00000000..d02608a5 --- /dev/null +++ b/sources/jemalloc/include-win/msvc_compat/C99/stdint.h @@ -0,0 +1,247 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#include + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we should wrap include with 'extern "C++" {}' +// or compiler give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#ifdef __cplusplus +extern "C" { +#endif +# include +#ifdef __cplusplus +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +#define INTMAX_C INT64_C +#define UINTMAX_C UINT64_C + +#endif // __STDC_CONSTANT_MACROS ] + + +#endif // _MSC_STDINT_H_ ] diff --git a/sources/jemalloc/include-win/msvc_compat/strings.h b/sources/jemalloc/include-win/msvc_compat/strings.h new file mode 100755 index 00000000..a3ee2506 --- /dev/null +++ b/sources/jemalloc/include-win/msvc_compat/strings.h @@ -0,0 +1,59 @@ +#ifndef strings_h +#define strings_h + +/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided + * for both */ +#ifdef _MSC_VER +# include +# pragma intrinsic(_BitScanForward) +static __forceinline int ffsl(long x) +{ + unsigned long i; + + if (_BitScanForward(&i, x)) + return (i + 1); + return (0); +} + +static __forceinline int ffs(int x) +{ + + return (ffsl(x)); +} + +# ifdef _M_X64 +# pragma intrinsic(_BitScanForward64) +# endif + +static __forceinline int ffsll(unsigned __int64 x) +{ + unsigned long i; +#ifdef _M_X64 + if (_BitScanForward64(&i, x)) + return (i + 1); + return (0); +#else +// Fallback for 32-bit build where 64-bit version not available +// assuming little endian + union { + unsigned __int64 ll; + unsigned long l[2]; + } s; + + s.ll = x; + + if (_BitScanForward(&i, s.l[0])) + return (i + 1); + else if(_BitScanForward(&i, s.l[1])) + return (i + 33); + return (0); +#endif +} + +#else +# define ffsll(x) __builtin_ffsll(x) +# define ffsl(x) __builtin_ffsl(x) +# define ffs(x) __builtin_ffs(x) +#endif + +#endif /* strings_h */ diff --git a/sources/jemalloc/include-win/msvc_compat/windows_extra.h b/sources/jemalloc/include-win/msvc_compat/windows_extra.h new file mode 100755 index 00000000..3008faa3 --- /dev/null +++ b/sources/jemalloc/include-win/msvc_compat/windows_extra.h @@ -0,0 +1,6 @@ +#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H +#define MSVC_COMPAT_WINDOWS_EXTRA_H + +#include + +#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ diff --git a/sources/jemalloc/src/je_arena.c b/sources/jemalloc/src/je_arena.c new file mode 100755 index 00000000..a9dff0b0 --- /dev/null +++ b/sources/jemalloc/src/je_arena.c @@ -0,0 +1,3950 @@ +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +bool opt_thp = true; +static bool thp_initially_huge; +purge_mode_t opt_purge = PURGE_DEFAULT; +const char *purge_mode_names[] = { + "ratio", + "decay", + "N/A" +}; +ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; +static ssize_t lg_dirty_mult_default; +ssize_t opt_decay_time = DECAY_TIME_DEFAULT; +static ssize_t decay_time_default; + +arena_bin_info_t arena_bin_info[NBINS]; + +size_t map_bias; +size_t map_misc_offset; +size_t arena_maxrun; /* Max run size for arenas. */ +size_t large_maxclass; /* Max large size class. */ +unsigned nlclasses; /* Number of large size classes. */ +unsigned nhclasses; /* Number of huge size classes. */ + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk); +static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, + size_t ndirty_limit); +static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, + bool dirty, bool cleaned, bool decommitted); +static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); +static void arena_bin_lower_run(arena_t *arena, arena_run_t *run, + arena_bin_t *bin); + +/******************************************************************************/ + +JEMALLOC_INLINE_C size_t +arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk; + size_t pageind, mapbits; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + pageind = arena_miscelm_to_pageind(miscelm); + mapbits = arena_mapbits_get(chunk, pageind); + return (arena_mapbits_size_decode(mapbits)); +} + +JEMALLOC_INLINE_C const extent_node_t * +arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + return (&chunk->node); +} + +JEMALLOC_INLINE_C int +arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b) +{ + size_t a_sn, b_sn; + + assert(a != NULL); + assert(b != NULL); + + a_sn = extent_node_sn_get(arena_miscelm_extent_get(a)); + b_sn = extent_node_sn_get(arena_miscelm_extent_get(b)); + + return ((a_sn > b_sn) - (a_sn < b_sn)); +} + +JEMALLOC_INLINE_C int +arena_ad_comp(const arena_chunk_map_misc_t *a, + const arena_chunk_map_misc_t *b) +{ + uintptr_t a_miscelm = (uintptr_t)a; + uintptr_t b_miscelm = (uintptr_t)b; + + assert(a != NULL); + assert(b != NULL); + + return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); +} + +JEMALLOC_INLINE_C int +arena_snad_comp(const arena_chunk_map_misc_t *a, + const arena_chunk_map_misc_t *b) +{ + int ret; + + assert(a != NULL); + assert(b != NULL); + + ret = arena_sn_comp(a, b); + if (ret != 0) + return (ret); + + ret = arena_ad_comp(a, b); + return (ret); +} + +/* Generate pairing heap functions. */ +ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, + ph_link, arena_snad_comp) + +#ifdef JEMALLOC_JET +#undef run_quantize_floor +#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) +#endif +static size_t +run_quantize_floor(size_t size) +{ + size_t ret; + pszind_t pind; + + assert(size > 0); + assert(size <= HUGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); + + assert(size != 0); + assert(size == PAGE_CEILING(size)); + + pind = psz2ind(size - large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return (size); + } + ret = pind2sz(pind - 1) + large_pad; + assert(ret <= size); + return (ret); +} +#ifdef JEMALLOC_JET +#undef run_quantize_floor +#define run_quantize_floor JEMALLOC_N(run_quantize_floor) +run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); +#endif + +#ifdef JEMALLOC_JET +#undef run_quantize_ceil +#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) +#endif +static size_t +run_quantize_ceil(size_t size) +{ + size_t ret; + + assert(size > 0); + assert(size <= HUGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); + + ret = run_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large run, + * because under-sized runs may be mixed in. This only happens + * when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; + } + return (ret); +} +#ifdef JEMALLOC_JET +#undef run_quantize_ceil +#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) +run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); +#endif + +static void +arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( + arena_miscelm_get_const(chunk, pageind)))); + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + assert((npages << LG_PAGE) < chunksize); + assert(pind2sz(pind) <= chunksize); + arena_run_heap_insert(&arena->runs_avail[pind], + arena_miscelm_get_mutable(chunk, pageind)); +} + +static void +arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( + arena_miscelm_get_const(chunk, pageind)))); + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + assert((npages << LG_PAGE) < chunksize); + assert(pind2sz(pind) <= chunksize); + arena_run_heap_remove(&arena->runs_avail[pind], + arena_miscelm_get_mutable(chunk, pageind)); +} + +static void +arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); + + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); + assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == + CHUNK_MAP_DIRTY); + + qr_new(&miscelm->rd, rd_link); + qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); + arena->ndirty += npages; +} + +static void +arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); + + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); + assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == + CHUNK_MAP_DIRTY); + + qr_remove(&miscelm->rd, rd_link); + assert(arena->ndirty >= npages); + arena->ndirty -= npages; +} + +static size_t +arena_chunk_dirty_npages(const extent_node_t *node) +{ + + return (extent_node_size_get(node) >> LG_PAGE); +} + +void +arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) +{ + + if (cache) { + extent_node_dirty_linkage_init(node); + extent_node_dirty_insert(node, &arena->runs_dirty, + &arena->chunks_cache); + arena->ndirty += arena_chunk_dirty_npages(node); + } +} + +void +arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) +{ + + if (dirty) { + extent_node_dirty_remove(node); + assert(arena->ndirty >= arena_chunk_dirty_npages(node)); + arena->ndirty -= arena_chunk_dirty_npages(node); + } +} + +JEMALLOC_INLINE_C void * +arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) +{ + void *ret; + size_t regind; + arena_chunk_map_misc_t *miscelm; + void *rpages; + + assert(run->nfree > 0); + assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); + + regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); + miscelm = arena_run_to_miscelm(run); + rpages = arena_miscelm_to_rpages(miscelm); + ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + + (uintptr_t)(bin_info->reg_interval * regind)); + run->nfree--; + return (ret); +} + +JEMALLOC_INLINE_C void +arena_run_reg_dalloc(arena_run_t *run, void *ptr) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t mapbits = arena_mapbits_get(chunk, pageind); + szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + size_t regind = arena_run_regind(run, bin_info, ptr); + + assert(run->nfree < bin_info->nregs); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - + ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + + (uintptr_t)bin_info->reg0_offset)) % + (uintptr_t)bin_info->reg_interval == 0); + assert((uintptr_t)ptr >= + (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + + (uintptr_t)bin_info->reg0_offset); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); + + bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); + run->nfree++; +} + +JEMALLOC_INLINE_C void +arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) +{ + + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (npages << LG_PAGE)); + memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, + (npages << LG_PAGE)); +} + +JEMALLOC_INLINE_C void +arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) +{ + + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind + << LG_PAGE)), PAGE); +} + +JEMALLOC_INLINE_C void +arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) +{ + size_t i; + UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); + + arena_run_page_mark_zeroed(chunk, run_ind); + for (i = 0; i < PAGE / sizeof(size_t); i++) + assert(p[i] == 0); +} + +static void +arena_nactive_add(arena_t *arena, size_t add_pages) +{ + + if (config_stats) { + size_t cactive_add = CHUNK_CEILING((arena->nactive + + add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << + LG_PAGE); + if (cactive_add != 0) + stats_cactive_add(cactive_add); + } + arena->nactive += add_pages; +} + +static void +arena_nactive_sub(arena_t *arena, size_t sub_pages) +{ + + if (config_stats) { + size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - + CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); + if (cactive_sub != 0) + stats_cactive_sub(cactive_sub); + } + arena->nactive -= sub_pages; +} + +static void +arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, + size_t flag_dirty, size_t flag_decommitted, size_t need_pages) +{ + size_t total_pages, rem_pages; + + assert(flag_dirty == 0 || flag_decommitted == 0); + + total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> + LG_PAGE; + assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == + flag_dirty); + assert(need_pages <= total_pages); + rem_pages = total_pages - need_pages; + + arena_avail_remove(arena, chunk, run_ind, total_pages); + if (flag_dirty != 0) + arena_run_dirty_remove(arena, chunk, run_ind, total_pages); + arena_nactive_add(arena, need_pages); + + /* Keep track of trailing unused pages for later use. */ + if (rem_pages > 0) { + size_t flags = flag_dirty | flag_decommitted; + size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : + 0; + + arena_mapbits_unallocated_set(chunk, run_ind+need_pages, + (rem_pages << LG_PAGE), flags | + (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & + flag_unzeroed_mask)); + arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, + (rem_pages << LG_PAGE), flags | + (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & + flag_unzeroed_mask)); + if (flag_dirty != 0) { + arena_run_dirty_insert(arena, chunk, run_ind+need_pages, + rem_pages); + } + arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); + } +} + +static bool +arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, + bool remove, bool zero) +{ + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + size_t flag_dirty, flag_decommitted, run_ind, need_pages; + size_t flag_unzeroed_mask; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); + flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); + flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); + need_pages = (size >> LG_PAGE); + assert(need_pages > 0); + + if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, + run_ind << LG_PAGE, size, arena->ind)) + return (true); + + if (remove) { + arena_run_split_remove(arena, chunk, run_ind, flag_dirty, + flag_decommitted, need_pages); + } + + if (zero) { + if (flag_decommitted != 0) { + /* The run is untouched, and therefore zeroed. */ + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void + *)((uintptr_t)chunk + (run_ind << LG_PAGE)), + (need_pages << LG_PAGE)); + } else if (flag_dirty != 0) { + /* The run is dirty, so all pages must be zeroed. */ + arena_run_zero(chunk, run_ind, need_pages); + } else { + /* + * The run is clean, so some pages may be zeroed (i.e. + * never before touched). + */ + size_t i; + for (i = 0; i < need_pages; i++) { + if (arena_mapbits_unzeroed_get(chunk, run_ind+i) + != 0) + arena_run_zero(chunk, run_ind+i, 1); + else if (config_debug) { + arena_run_page_validate_zeroed(chunk, + run_ind+i); + } else { + arena_run_page_mark_zeroed(chunk, + run_ind+i); + } + } + } + } else { + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); + } + + /* + * Set the last element first, in case the run only contains one page + * (i.e. both statements set the same element). + */ + flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; + arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + run_ind+need_pages-1))); + arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); + return (false); +} + +static bool +arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) +{ + + return (arena_run_split_large_helper(arena, run, size, true, zero)); +} + +static bool +arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) +{ + + return (arena_run_split_large_helper(arena, run, size, false, zero)); +} + +static bool +arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, + szind_t binind) +{ + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; + + assert(binind != BININD_INVALID); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); + flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); + flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); + need_pages = (size >> LG_PAGE); + assert(need_pages > 0); + + if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, + run_ind << LG_PAGE, size, arena->ind)) + return (true); + + arena_run_split_remove(arena, chunk, run_ind, flag_dirty, + flag_decommitted, need_pages); + + for (i = 0; i < need_pages; i++) { + size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, + run_ind+i); + arena_mapbits_small_set(chunk, run_ind+i, i, binind, + flag_unzeroed); + if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) + arena_run_page_validate_zeroed(chunk, run_ind+i); + } + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); + return (false); +} + +static arena_chunk_t * +arena_chunk_init_spare(arena_t *arena) +{ + arena_chunk_t *chunk; + + assert(arena->spare != NULL); + + chunk = arena->spare; + arena->spare = NULL; + + assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); + assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); + assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == + arena_maxrun); + assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == + arena_maxrun); + assert(arena_mapbits_dirty_get(chunk, map_bias) == + arena_mapbits_dirty_get(chunk, chunk_npages-1)); + + return (chunk); +} + +static bool +arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero, + bool *gdump) +{ + + /* + * The extent node notion of "committed" doesn't directly apply to + * arena chunks. Arbitrarily mark them as committed. The commit state + * of runs is tracked individually, and upon chunk deallocation the + * entire chunk is in a consistent commit state. + */ + extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true); + extent_node_achunk_set(&chunk->node, true); + return (chunk_register(chunk, &chunk->node, gdump)); +} + +static arena_chunk_t * +arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) +{ + arena_chunk_t *chunk; + size_t sn; + + malloc_mutex_unlock(tsdn, &arena->lock); + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); + + chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, + NULL, chunksize, chunksize, &sn, zero, commit); + if (chunk != NULL && !*commit) { + /* Commit header. */ + if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << + LG_PAGE, arena->ind)) { + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, + (void *)chunk, chunksize, sn, *zero, *commit); + chunk = NULL; + } + } + if (chunk != NULL) { + bool gdump; + if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) { + if (!*commit) { + /* Undo commit of header. */ + chunk_hooks->decommit(chunk, chunksize, 0, + map_bias << LG_PAGE, arena->ind); + } + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, + (void *)chunk, chunksize, sn, *zero, *commit); + chunk = NULL; + } + if (config_prof && opt_prof && gdump) + prof_gdump(tsdn); + } + + malloc_mutex_lock(tsdn, &arena->lock); + return (chunk); +} + +static arena_chunk_t * +arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, + bool *commit) +{ + arena_chunk_t *chunk; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t sn; + + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1); + malloc_mutex_assert_owner(tsdn, &arena->lock); + + chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, + chunksize, &sn, zero, commit, true); + if (chunk != NULL) { + bool gdump; + if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) { + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, + chunksize, sn, true); + return (NULL); + } + if (config_prof && opt_prof && gdump) { + malloc_mutex_unlock(tsdn, &arena->lock); + prof_gdump(tsdn); + malloc_mutex_lock(tsdn, &arena->lock); + } + } + if (chunk == NULL) { + chunk = arena_chunk_alloc_internal_hard(tsdn, arena, + &chunk_hooks, zero, commit); + } + + if (config_stats && chunk != NULL) { + arena->stats.mapped += chunksize; + arena->stats.metadata_mapped += (map_bias << LG_PAGE); + } + + return (chunk); +} + +static arena_chunk_t * +arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) +{ + arena_chunk_t *chunk; + bool zero, commit; + size_t flag_unzeroed, flag_decommitted, i; + + assert(arena->spare == NULL); + + zero = false; + commit = false; + chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); + if (chunk == NULL) + return (NULL); + + if (config_thp && opt_thp) { + chunk->hugepage = thp_initially_huge; + } + + /* + * Initialize the map to contain one maximal free untouched run. Mark + * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed + * or decommitted chunk. + */ + flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; + flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; + arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, + flag_unzeroed | flag_decommitted); + /* + * There is no need to initialize the internal page map entries unless + * the chunk is not zeroed. + */ + if (!zero) { + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( + (void *)arena_bitselm_get_const(chunk, map_bias+1), + (size_t)((uintptr_t)arena_bitselm_get_const(chunk, + chunk_npages-1) - + (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); + for (i = map_bias+1; i < chunk_npages-1; i++) + arena_mapbits_internal_set(chunk, i, flag_unzeroed); + } else { + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void + *)arena_bitselm_get_const(chunk, map_bias+1), + (size_t)((uintptr_t)arena_bitselm_get_const(chunk, + chunk_npages-1) - + (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); + if (config_debug) { + for (i = map_bias+1; i < chunk_npages-1; i++) { + assert(arena_mapbits_unzeroed_get(chunk, i) == + flag_unzeroed); + } + } + } + arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, + flag_unzeroed); + + return (chunk); +} + +static arena_chunk_t * +arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) +{ + arena_chunk_t *chunk; + + if (arena->spare != NULL) + chunk = arena_chunk_init_spare(arena); + else { + chunk = arena_chunk_init_hard(tsdn, arena); + if (chunk == NULL) + return (NULL); + } + + ql_elm_new(&chunk->node, ql_link); + ql_tail_insert(&arena->achunks, &chunk->node, ql_link); + arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); + + return (chunk); +} + +static void +arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) +{ + size_t sn; + UNUSED bool hugepage JEMALLOC_CC_SILENCE_INIT(false); + bool committed; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + + chunk_deregister(chunk, &chunk->node); + + sn = extent_node_sn_get(&chunk->node); + if (config_thp && opt_thp) { + hugepage = chunk->hugepage; + } + committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); + if (!committed) { + /* + * Decommit the header. Mark the chunk as decommitted even if + * header decommit fails, since treating a partially committed + * chunk as committed has a high potential for causing later + * access of decommitted memory. + */ + chunk_hooks = chunk_hooks_get(tsdn, arena); + chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, + arena->ind); + } + if (config_thp && opt_thp && hugepage != thp_initially_huge) { + /* + * Convert chunk back to initial THP state, so that all + * subsequent chunk allocations start out in a consistent state. + */ + if (thp_initially_huge) { + pages_huge(chunk, chunksize); + } else { + pages_nohuge(chunk, chunksize); + } + } + + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, + sn, committed); + + if (config_stats) { + arena->stats.mapped -= chunksize; + arena->stats.metadata_mapped -= (map_bias << LG_PAGE); + } +} + +static void +arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare) +{ + + assert(arena->spare != spare); + + if (arena_mapbits_dirty_get(spare, map_bias) != 0) { + arena_run_dirty_remove(arena, spare, map_bias, + chunk_npages-map_bias); + } + + arena_chunk_discard(tsdn, arena, spare); +} + +static void +arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) +{ + arena_chunk_t *spare; + + assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); + assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); + assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == + arena_maxrun); + assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == + arena_maxrun); + assert(arena_mapbits_dirty_get(chunk, map_bias) == + arena_mapbits_dirty_get(chunk, chunk_npages-1)); + assert(arena_mapbits_decommitted_get(chunk, map_bias) == + arena_mapbits_decommitted_get(chunk, chunk_npages-1)); + + /* Remove run from runs_avail, so that the arena does not use it. */ + arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); + + ql_remove(&arena->achunks, &chunk->node, ql_link); + spare = arena->spare; + arena->spare = chunk; + if (spare != NULL) + arena_spare_discard(tsdn, arena, spare); +} + +static void +arena_huge_malloc_stats_update(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.nmalloc_huge++; + arena->stats.allocated_huge += usize; + arena->stats.hstats[index].nmalloc++; + arena->stats.hstats[index].curhchunks++; +} + +static void +arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.nmalloc_huge--; + arena->stats.allocated_huge -= usize; + arena->stats.hstats[index].nmalloc--; + arena->stats.hstats[index].curhchunks--; +} + +static void +arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge++; + arena->stats.allocated_huge -= usize; + arena->stats.hstats[index].ndalloc++; + arena->stats.hstats[index].curhchunks--; +} + +static void +arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge++; + arena->stats.hstats[index].ndalloc--; +} + +static void +arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge--; + arena->stats.allocated_huge += usize; + arena->stats.hstats[index].ndalloc--; + arena->stats.hstats[index].curhchunks++; +} + +static void +arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) +{ + + arena_huge_dalloc_stats_update(arena, oldsize); + arena_huge_malloc_stats_update(arena, usize); +} + +static void +arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, + size_t usize) +{ + + arena_huge_dalloc_stats_update_undo(arena, oldsize); + arena_huge_malloc_stats_update_undo(arena, usize); +} + +extent_node_t * +arena_node_alloc(tsdn_t *tsdn, arena_t *arena) +{ + extent_node_t *node; + + malloc_mutex_lock(tsdn, &arena->node_cache_mtx); + node = ql_last(&arena->node_cache, ql_link); + if (node == NULL) { + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); + return (base_alloc(tsdn, sizeof(extent_node_t))); + } + ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); + return (node); +} + +void +arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) +{ + + malloc_mutex_lock(tsdn, &arena->node_cache_mtx); + ql_elm_new(node, ql_link); + ql_tail_insert(&arena->node_cache, node, ql_link); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); +} + +static void * +arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn, + bool *zero, size_t csize) +{ + void *ret; + bool commit = true; + + ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, + alignment, sn, zero, &commit); + if (ret == NULL) { + /* Revert optimistic stats updates. */ + malloc_mutex_lock(tsdn, &arena->lock); + if (config_stats) { + arena_huge_malloc_stats_update_undo(arena, usize); + arena->stats.mapped -= usize; + } + arena_nactive_sub(arena, usize >> LG_PAGE); + malloc_mutex_unlock(tsdn, &arena->lock); + } + + return (ret); +} + +void * +arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, size_t *sn, bool *zero) +{ + void *ret; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t csize = CHUNK_CEILING(usize); + bool commit = true; + + malloc_mutex_lock(tsdn, &arena->lock); + + /* Optimistically update stats. */ + if (config_stats) { + arena_huge_malloc_stats_update(arena, usize); + arena->stats.mapped += usize; + } + arena_nactive_add(arena, usize >> LG_PAGE); + + ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, + alignment, sn, zero, &commit, true); + malloc_mutex_unlock(tsdn, &arena->lock); + if (ret == NULL) { + ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, + usize, alignment, sn, zero, csize); + } + + return (ret); +} + +void +arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize, + size_t sn) +{ + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t csize; + + csize = CHUNK_CEILING(usize); + malloc_mutex_lock(tsdn, &arena->lock); + if (config_stats) { + arena_huge_dalloc_stats_update(arena, usize); + arena->stats.mapped -= usize; + } + arena_nactive_sub(arena, usize >> LG_PAGE); + + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true); + malloc_mutex_unlock(tsdn, &arena->lock); +} + +void +arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize) +{ + + assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); + assert(oldsize != usize); + + malloc_mutex_lock(tsdn, &arena->lock); + if (config_stats) + arena_huge_ralloc_stats_update(arena, oldsize, usize); + if (oldsize < usize) + arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); + else + arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); + malloc_mutex_unlock(tsdn, &arena->lock); +} + +void +arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize, size_t sn) +{ + size_t udiff = oldsize - usize; + size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); + + malloc_mutex_lock(tsdn, &arena->lock); + if (config_stats) { + arena_huge_ralloc_stats_update(arena, oldsize, usize); + if (cdiff != 0) + arena->stats.mapped -= cdiff; + } + arena_nactive_sub(arena, udiff >> LG_PAGE); + + if (cdiff != 0) { + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + void *nchunk = (void *)((uintptr_t)chunk + + CHUNK_CEILING(usize)); + + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, + sn, true); + } + malloc_mutex_unlock(tsdn, &arena->lock); +} + +static bool +arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, + size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff) +{ + bool err; + bool commit = true; + + err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, + chunksize, sn, zero, &commit) == NULL); + if (err) { + /* Revert optimistic stats updates. */ + malloc_mutex_lock(tsdn, &arena->lock); + if (config_stats) { + arena_huge_ralloc_stats_update_undo(arena, oldsize, + usize); + arena->stats.mapped -= cdiff; + } + arena_nactive_sub(arena, udiff >> LG_PAGE); + malloc_mutex_unlock(tsdn, &arena->lock); + } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, + cdiff, true, arena->ind)) { + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, + *sn, *zero, true); + err = true; + } + return (err); +} + +bool +arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize, bool *zero) +{ + bool err; + chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); + void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); + size_t udiff = usize - oldsize; + size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); + size_t sn; + bool commit = true; + + malloc_mutex_lock(tsdn, &arena->lock); + + /* Optimistically update stats. */ + if (config_stats) { + arena_huge_ralloc_stats_update(arena, oldsize, usize); + arena->stats.mapped += cdiff; + } + arena_nactive_add(arena, udiff >> LG_PAGE); + + err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, + chunksize, &sn, zero, &commit, true) == NULL); + malloc_mutex_unlock(tsdn, &arena->lock); + if (err) { + err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, + &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk, + udiff, cdiff); + } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, + cdiff, true, arena->ind)) { + chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, + sn, *zero, true); + err = true; + } + + return (err); +} + +/* + * Do first-best-fit run selection, i.e. select the lowest run that best fits. + * Run sizes are indexed, so not all candidate runs are necessarily exactly the + * same size. + */ +static arena_run_t * +arena_run_first_best_fit(arena_t *arena, size_t size) +{ + pszind_t pind, i; + + pind = psz2ind(run_quantize_ceil(size)); + + for (i = pind; pind2sz(i) <= chunksize; i++) { + arena_chunk_map_misc_t *miscelm = arena_run_heap_first( + &arena->runs_avail[i]); + if (miscelm != NULL) + return (&miscelm->run); + } + + return (NULL); +} + +static arena_run_t * +arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) +{ + arena_run_t *run = arena_run_first_best_fit(arena, size); + if (run != NULL) { + if (arena_run_split_large(arena, run, size, zero)) + run = NULL; + } + return (run); +} + +static arena_run_t * +arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) +{ + arena_chunk_t *chunk; + arena_run_t *run; + + assert(size <= arena_maxrun); + assert(size == PAGE_CEILING(size)); + + /* Search the arena's chunks for the lowest best fit. */ + run = arena_run_alloc_large_helper(arena, size, zero); + if (run != NULL) + return (run); + + /* + * No usable runs. Create a new chunk from which to allocate the run. + */ + chunk = arena_chunk_alloc(tsdn, arena); + if (chunk != NULL) { + run = &arena_miscelm_get_mutable(chunk, map_bias)->run; + if (arena_run_split_large(arena, run, size, zero)) + run = NULL; + return (run); + } + + /* + * arena_chunk_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped arena->lock in + * arena_chunk_alloc(), so search one more time. + */ + return (arena_run_alloc_large_helper(arena, size, zero)); +} + +static arena_run_t * +arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) +{ + arena_run_t *run = arena_run_first_best_fit(arena, size); + if (run != NULL) { + if (arena_run_split_small(arena, run, size, binind)) + run = NULL; + } + return (run); +} + +static arena_run_t * +arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) +{ + arena_chunk_t *chunk; + arena_run_t *run; + + assert(size <= arena_maxrun); + assert(size == PAGE_CEILING(size)); + assert(binind != BININD_INVALID); + + /* Search the arena's chunks for the lowest best fit. */ + run = arena_run_alloc_small_helper(arena, size, binind); + if (run != NULL) + return (run); + + /* + * No usable runs. Create a new chunk from which to allocate the run. + */ + chunk = arena_chunk_alloc(tsdn, arena); + if (chunk != NULL) { + run = &arena_miscelm_get_mutable(chunk, map_bias)->run; + if (arena_run_split_small(arena, run, size, binind)) + run = NULL; + return (run); + } + + /* + * arena_chunk_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped arena->lock in + * arena_chunk_alloc(), so search one more time. + */ + return (arena_run_alloc_small_helper(arena, size, binind)); +} + +static bool +arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) +{ + + return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) + << 3)); +} + +ssize_t +arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) +{ + ssize_t lg_dirty_mult; + + malloc_mutex_lock(tsdn, &arena->lock); + lg_dirty_mult = arena->lg_dirty_mult; + malloc_mutex_unlock(tsdn, &arena->lock); + + return (lg_dirty_mult); +} + +bool +arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) +{ + + if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) + return (true); + + malloc_mutex_lock(tsdn, &arena->lock); + arena->lg_dirty_mult = lg_dirty_mult; + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); + + return (false); +} + +static void +arena_decay_deadline_init(arena_t *arena) +{ + + assert(opt_purge == purge_mode_decay); + + /* + * Generate a new deadline that is uniformly random within the next + * epoch after the current one. + */ + nstime_copy(&arena->decay.deadline, &arena->decay.epoch); + nstime_add(&arena->decay.deadline, &arena->decay.interval); + if (arena->decay.time > 0) { + nstime_t jitter; + + nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state, + nstime_ns(&arena->decay.interval))); + nstime_add(&arena->decay.deadline, &jitter); + } +} + +static bool +arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) +{ + + assert(opt_purge == purge_mode_decay); + + return (nstime_compare(&arena->decay.deadline, time) <= 0); +} + +static size_t +arena_decay_backlog_npages_limit(const arena_t *arena) +{ + static const uint64_t h_steps[] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP + }; + uint64_t sum; + size_t npages_limit_backlog; + unsigned i; + + assert(opt_purge == purge_mode_decay); + + /* + * For each element of decay_backlog, multiply by the corresponding + * fixed-point smoothstep decay factor. Sum the products, then divide + * to round down to the nearest whole number of pages. + */ + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) + sum += arena->decay.backlog[i] * h_steps[i]; + npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); + + return (npages_limit_backlog); +} + +static void +arena_decay_backlog_update_last(arena_t *arena) +{ + size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? + arena->ndirty - arena->decay.ndirty : 0; + arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; +} + +static void +arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) +{ + + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; + + assert((uint64_t)nadvance_z == nadvance_u64); + + memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } + + arena_decay_backlog_update_last(arena); +} + +static void +arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) +{ + uint64_t nadvance_u64; + nstime_t delta; + + assert(opt_purge == purge_mode_decay); + assert(arena_decay_deadline_reached(arena, time)); + + nstime_copy(&delta, time); + nstime_subtract(&delta, &arena->decay.epoch); + nadvance_u64 = nstime_divide(&delta, &arena->decay.interval); + assert(nadvance_u64 > 0); + + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &arena->decay.interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&arena->decay.epoch, &delta); + + /* Set a new deadline. */ + arena_decay_deadline_init(arena); + + /* Update the backlog. */ + arena_decay_backlog_update(arena, nadvance_u64); +} + +static void +arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) +{ + size_t ndirty_limit = arena_decay_backlog_npages_limit(arena); + + if (arena->ndirty > ndirty_limit) + arena_purge_to_limit(tsdn, arena, ndirty_limit); + arena->decay.ndirty = arena->ndirty; +} + +static void +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) +{ + + arena_decay_epoch_advance_helper(arena, time); + arena_decay_epoch_advance_purge(tsdn, arena); +} + +static void +arena_decay_init(arena_t *arena, ssize_t decay_time) +{ + + arena->decay.time = decay_time; + if (decay_time > 0) { + nstime_init2(&arena->decay.interval, decay_time, 0); + nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS); + } + + nstime_init(&arena->decay.epoch, 0); + nstime_update(&arena->decay.epoch); + arena->decay.jitter_state = (uint64_t)(uintptr_t)arena; + arena_decay_deadline_init(arena); + arena->decay.ndirty = arena->ndirty; + memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); +} + +static bool +arena_decay_time_valid(ssize_t decay_time) +{ + + if (decay_time < -1) + return (false); + if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) + return (true); + return (false); +} + +ssize_t +arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) +{ + ssize_t decay_time; + + malloc_mutex_lock(tsdn, &arena->lock); + decay_time = arena->decay.time; + malloc_mutex_unlock(tsdn, &arena->lock); + + return (decay_time); +} + +bool +arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) +{ + + if (!arena_decay_time_valid(decay_time)) + return (true); + + malloc_mutex_lock(tsdn, &arena->lock); + /* + * Restart decay backlog from scratch, which may cause many dirty pages + * to be immediately purged. It would conceptually be possible to map + * the old backlog onto the new backlog, but there is no justification + * for such complexity since decay_time changes are intended to be + * infrequent, either between the {-1, 0, >0} states, or a one-time + * arbitrary change during initial arena configuration. + */ + arena_decay_init(arena, decay_time); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); + + return (false); +} + +static void +arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) +{ + + assert(opt_purge == purge_mode_ratio); + + /* Don't purge if the option is disabled. */ + if (arena->lg_dirty_mult < 0) + return; + + /* + * Iterate, since preventing recursive purging could otherwise leave too + * many dirty pages. + */ + while (true) { + size_t threshold = (arena->nactive >> arena->lg_dirty_mult); + if (threshold < chunk_npages) + threshold = chunk_npages; + /* + * Don't purge unless the number of purgeable pages exceeds the + * threshold. + */ + if (arena->ndirty <= threshold) + return; + arena_purge_to_limit(tsdn, arena, threshold); + } +} + +static void +arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) +{ + nstime_t time; + + assert(opt_purge == purge_mode_decay); + + /* Purge all or nothing if the option is disabled. */ + if (arena->decay.time <= 0) { + if (arena->decay.time == 0) + arena_purge_to_limit(tsdn, arena, 0); + return; + } + + nstime_init(&time, 0); + nstime_update(&time); + if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch, + &time) > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&arena->decay.epoch, &time); + arena_decay_deadline_init(arena); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&arena->decay.epoch, &time) <= 0); + } + + /* + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances. + */ + if (arena_decay_deadline_reached(arena, &time)) + arena_decay_epoch_advance(tsdn, arena, &time); +} + +void +arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) +{ + + /* Don't recursively purge. */ + if (arena->purging) + return; + + if (opt_purge == purge_mode_ratio) + arena_maybe_purge_ratio(tsdn, arena); + else + arena_maybe_purge_decay(tsdn, arena); +} + +static size_t +arena_dirty_count(arena_t *arena) +{ + size_t ndirty = 0; + arena_runs_dirty_link_t *rdelm; + extent_node_t *chunkselm; + + for (rdelm = qr_next(&arena->runs_dirty, rd_link), + chunkselm = qr_next(&arena->chunks_cache, cc_link); + rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { + size_t npages; + + if (rdelm == &chunkselm->rd) { + npages = extent_node_size_get(chunkselm) >> LG_PAGE; + chunkselm = qr_next(chunkselm, cc_link); + } else { + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + assert(arena_mapbits_allocated_get(chunk, pageind) == + 0); + assert(arena_mapbits_large_get(chunk, pageind) == 0); + assert(arena_mapbits_dirty_get(chunk, pageind) != 0); + npages = arena_mapbits_unallocated_size_get(chunk, + pageind) >> LG_PAGE; + } + ndirty += npages; + } + + return (ndirty); +} + +static size_t +arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) +{ + arena_runs_dirty_link_t *rdelm, *rdelm_next; + extent_node_t *chunkselm; + size_t nstashed = 0; + + /* Stash runs/chunks according to ndirty_limit. */ + for (rdelm = qr_next(&arena->runs_dirty, rd_link), + chunkselm = qr_next(&arena->chunks_cache, cc_link); + rdelm != &arena->runs_dirty; rdelm = rdelm_next) { + size_t npages; + rdelm_next = qr_next(rdelm, rd_link); + + if (rdelm == &chunkselm->rd) { + extent_node_t *chunkselm_next; + size_t sn; + bool zero, commit; + UNUSED void *chunk; + + npages = extent_node_size_get(chunkselm) >> LG_PAGE; + if (opt_purge == purge_mode_decay && arena->ndirty - + (nstashed + npages) < ndirty_limit) + break; + + chunkselm_next = qr_next(chunkselm, cc_link); + /* + * Allocate. chunkselm remains valid due to the + * dalloc_node=false argument to chunk_alloc_cache(). + */ + zero = false; + commit = false; + chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, + extent_node_addr_get(chunkselm), + extent_node_size_get(chunkselm), chunksize, &sn, + &zero, &commit, false); + assert(chunk == extent_node_addr_get(chunkselm)); + assert(zero == extent_node_zeroed_get(chunkselm)); + extent_node_dirty_insert(chunkselm, purge_runs_sentinel, + purge_chunks_sentinel); + assert(npages == (extent_node_size_get(chunkselm) >> + LG_PAGE)); + chunkselm = chunkselm_next; + } else { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + arena_run_t *run = &miscelm->run; + size_t run_size = + arena_mapbits_unallocated_size_get(chunk, pageind); + + npages = run_size >> LG_PAGE; + if (opt_purge == purge_mode_decay && arena->ndirty - + (nstashed + npages) < ndirty_limit) + break; + + assert(pageind + npages <= chunk_npages); + assert(arena_mapbits_dirty_get(chunk, pageind) == + arena_mapbits_dirty_get(chunk, pageind+npages-1)); + + /* + * If purging the spare chunk's run, make it available + * prior to allocation. + */ + if (chunk == arena->spare) + arena_chunk_alloc(tsdn, arena); + + /* Temporarily allocate the free dirty run. */ + arena_run_split_large(arena, run, run_size, false); + /* Stash. */ + if (false) + qr_new(rdelm, rd_link); /* Redundant. */ + else { + assert(qr_next(rdelm, rd_link) == rdelm); + assert(qr_prev(rdelm, rd_link) == rdelm); + } + qr_meld(purge_runs_sentinel, rdelm, rd_link); + } + + nstashed += npages; + if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= + ndirty_limit) + break; + } + + return (nstashed); +} + +static size_t +arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) +{ + size_t npurged, nmadvise; + arena_runs_dirty_link_t *rdelm; + extent_node_t *chunkselm; + + if (config_stats) + nmadvise = 0; + npurged = 0; + + malloc_mutex_unlock(tsdn, &arena->lock); + for (rdelm = qr_next(purge_runs_sentinel, rd_link), + chunkselm = qr_next(purge_chunks_sentinel, cc_link); + rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { + size_t npages; + + if (rdelm == &chunkselm->rd) { + /* + * Don't actually purge the chunk here because 1) + * chunkselm is embedded in the chunk and must remain + * valid, and 2) we deallocate the chunk in + * arena_unstash_purged(), where it is destroyed, + * decommitted, or purged, depending on chunk + * deallocation policy. + */ + size_t size = extent_node_size_get(chunkselm); + npages = size >> LG_PAGE; + chunkselm = qr_next(chunkselm, cc_link); + } else { + size_t pageind, run_size, flag_unzeroed, flags, i; + bool decommitted; + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + pageind = arena_miscelm_to_pageind(miscelm); + run_size = arena_mapbits_large_size_get(chunk, pageind); + npages = run_size >> LG_PAGE; + + /* + * If this is the first run purged within chunk, mark + * the chunk as non-THP-capable. This will prevent all + * use of THPs for this chunk until the chunk as a whole + * is deallocated. + */ + if (config_thp && opt_thp && chunk->hugepage) { + chunk->hugepage = pages_nohuge(chunk, + chunksize); + } + + assert(pageind + npages <= chunk_npages); + assert(!arena_mapbits_decommitted_get(chunk, pageind)); + assert(!arena_mapbits_decommitted_get(chunk, + pageind+npages-1)); + decommitted = !chunk_hooks->decommit(chunk, chunksize, + pageind << LG_PAGE, npages << LG_PAGE, arena->ind); + if (decommitted) { + flag_unzeroed = 0; + flags = CHUNK_MAP_DECOMMITTED; + } else { + flag_unzeroed = chunk_purge_wrapper(tsdn, arena, + chunk_hooks, chunk, chunksize, pageind << + LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; + flags = flag_unzeroed; + } + arena_mapbits_large_set(chunk, pageind+npages-1, 0, + flags); + arena_mapbits_large_set(chunk, pageind, run_size, + flags); + + /* + * Set the unzeroed flag for internal pages, now that + * chunk_purge_wrapper() has returned whether the pages + * were zeroed as a side effect of purging. This chunk + * map modification is safe even though the arena mutex + * isn't currently owned by this thread, because the run + * is marked as allocated, thus protecting it from being + * modified by any other thread. As long as these + * writes don't perturb the first and last elements' + * CHUNK_MAP_ALLOCATED bits, behavior is well defined. + */ + for (i = 1; i < npages-1; i++) { + arena_mapbits_internal_set(chunk, pageind+i, + flag_unzeroed); + } + } + + npurged += npages; + if (config_stats) + nmadvise++; + } + malloc_mutex_lock(tsdn, &arena->lock); + + if (config_stats) { + arena->stats.nmadvise += nmadvise; + arena->stats.purged += npurged; + } + + return (npurged); +} + +static void +arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) +{ + arena_runs_dirty_link_t *rdelm, *rdelm_next; + extent_node_t *chunkselm; + + /* Deallocate chunks/runs. */ + for (rdelm = qr_next(purge_runs_sentinel, rd_link), + chunkselm = qr_next(purge_chunks_sentinel, cc_link); + rdelm != purge_runs_sentinel; rdelm = rdelm_next) { + rdelm_next = qr_next(rdelm, rd_link); + if (rdelm == &chunkselm->rd) { + extent_node_t *chunkselm_next = qr_next(chunkselm, + cc_link); + void *addr = extent_node_addr_get(chunkselm); + size_t size = extent_node_size_get(chunkselm); + size_t sn = extent_node_sn_get(chunkselm); + bool zeroed = extent_node_zeroed_get(chunkselm); + bool committed = extent_node_committed_get(chunkselm); + extent_node_dirty_remove(chunkselm); + arena_node_dalloc(tsdn, arena, chunkselm); + chunkselm = chunkselm_next; + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, + size, sn, zeroed, committed); + } else { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + bool decommitted = (arena_mapbits_decommitted_get(chunk, + pageind) != 0); + arena_run_t *run = &miscelm->run; + qr_remove(rdelm, rd_link); + arena_run_dalloc(tsdn, arena, run, false, true, + decommitted); + } + } +} + +/* + * NB: ndirty_limit is interpreted differently depending on opt_purge: + * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the + * desired state: + * (arena->ndirty <= ndirty_limit) + * - purge_mode_decay: Purge as many dirty runs/chunks as possible without + * violating the invariant: + * (arena->ndirty >= ndirty_limit) + */ +static void +arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) +{ + chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); + size_t npurge, npurged; + arena_runs_dirty_link_t purge_runs_sentinel; + extent_node_t purge_chunks_sentinel; + + arena->purging = true; + + /* + * Calls to arena_dirty_count() are disabled even for debug builds + * because overhead grows nonlinearly as memory usage increases. + */ + if (false && config_debug) { + size_t ndirty = arena_dirty_count(arena); + assert(ndirty == arena->ndirty); + } + assert(opt_purge != purge_mode_ratio || (arena->nactive >> + arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); + + qr_new(&purge_runs_sentinel, rd_link); + extent_node_dirty_linkage_init(&purge_chunks_sentinel); + + npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, + &purge_runs_sentinel, &purge_chunks_sentinel); + if (npurge == 0) + goto label_return; + npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, + &purge_runs_sentinel, &purge_chunks_sentinel); + assert(npurged == npurge); + arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, + &purge_chunks_sentinel); + + if (config_stats) + arena->stats.npurge++; + +label_return: + arena->purging = false; +} + +void +arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) +{ + + malloc_mutex_lock(tsdn, &arena->lock); + if (all) + arena_purge_to_limit(tsdn, arena, 0); + else + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); +} + +static void +arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) +{ + size_t pageind, npages; + + cassert(config_prof); + assert(opt_prof); + + /* + * Iterate over the allocated runs and remove profiled allocations from + * the sample set. + */ + for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { + if (arena_mapbits_allocated_get(chunk, pageind) != 0) { + if (arena_mapbits_large_get(chunk, pageind) != 0) { + void *ptr = (void *)((uintptr_t)chunk + (pageind + << LG_PAGE)); + size_t usize = isalloc(tsd_tsdn(tsd), ptr, + config_prof); + + prof_free(tsd, ptr, usize); + npages = arena_mapbits_large_size_get(chunk, + pageind) >> LG_PAGE; + } else { + /* Skip small run. */ + size_t binind = arena_mapbits_binind_get(chunk, + pageind); + arena_bin_info_t *bin_info = + &arena_bin_info[binind]; + npages = bin_info->run_size >> LG_PAGE; + } + } else { + /* Skip unallocated run. */ + npages = arena_mapbits_unallocated_size_get(chunk, + pageind) >> LG_PAGE; + } + assert(pageind + npages <= chunk_npages); + } +} + +void +arena_reset(tsd_t *tsd, arena_t *arena) +{ + unsigned i; + extent_node_t *node; + + /* + * Locking in this function is unintuitive. The caller guarantees that + * no concurrent operations are happening in this arena, but there are + * still reasons that some locking is necessary: + * + * - Some of the functions in the transitive closure of calls assume + * appropriate locks are held, and in some cases these locks are + * temporarily dropped to avoid lock order reversal or deadlock due to + * reentry. + * - mallctl("epoch", ...) may concurrently refresh stats. While + * strictly speaking this is a "concurrent operation", disallowing + * stats refreshes would impose an inconvenient burden. + */ + + /* Remove large allocations from prof sample set. */ + if (config_prof && opt_prof) { + ql_foreach(node, &arena->achunks, ql_link) { + arena_achunk_prof_reset(tsd, arena, + extent_node_addr_get(node)); + } + } + + /* Reset curruns for large size classes. */ + if (config_stats) { + for (i = 0; i < nlclasses; i++) + arena->stats.lstats[i].curruns = 0; + } + + /* Huge allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); + for (node = ql_last(&arena->huge, ql_link); node != NULL; node = + ql_last(&arena->huge, ql_link)) { + void *ptr = extent_node_addr_get(node); + size_t usize; + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); + if (config_stats || (config_prof && opt_prof)) + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + /* Remove huge allocation from prof sample set. */ + if (config_prof && opt_prof) + prof_free(tsd, ptr, usize); + huge_dalloc(tsd_tsdn(tsd), ptr); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); + /* Cancel out unwanted effects on stats. */ + if (config_stats) + arena_huge_reset_stats_cancel(arena, usize); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); + + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); + + /* Bins. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + bin->runcur = NULL; + arena_run_heap_new(&bin->runs); + if (config_stats) { + bin->stats.curregs = 0; + bin->stats.curruns = 0; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + } + + /* + * Re-initialize runs_dirty such that the chunks_cache and runs_dirty + * chains directly correspond. + */ + qr_new(&arena->runs_dirty, rd_link); + for (node = qr_next(&arena->chunks_cache, cc_link); + node != &arena->chunks_cache; node = qr_next(node, cc_link)) { + qr_new(&node->rd, rd_link); + qr_meld(&arena->runs_dirty, &node->rd, rd_link); + } + + /* Arena chunks. */ + for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = + ql_last(&arena->achunks, ql_link)) { + ql_remove(&arena->achunks, node, ql_link); + arena_chunk_discard(tsd_tsdn(tsd), arena, + extent_node_addr_get(node)); + } + + /* Spare. */ + if (arena->spare != NULL) { + arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare); + arena->spare = NULL; + } + + assert(!arena->purging); + arena->nactive = 0; + + for (i = 0; i < NPSIZES; i++) + arena_run_heap_new(&arena->runs_avail[i]); + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); +} + +static void +arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, + size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, + size_t flag_decommitted) +{ + size_t size = *p_size; + size_t run_ind = *p_run_ind; + size_t run_pages = *p_run_pages; + + /* Try to coalesce forward. */ + if (run_ind + run_pages < chunk_npages && + arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && + arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && + arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == + flag_decommitted) { + size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, + run_ind+run_pages); + size_t nrun_pages = nrun_size >> LG_PAGE; + + /* + * Remove successor from runs_avail; the coalesced run is + * inserted later. + */ + assert(arena_mapbits_unallocated_size_get(chunk, + run_ind+run_pages+nrun_pages-1) == nrun_size); + assert(arena_mapbits_dirty_get(chunk, + run_ind+run_pages+nrun_pages-1) == flag_dirty); + assert(arena_mapbits_decommitted_get(chunk, + run_ind+run_pages+nrun_pages-1) == flag_decommitted); + arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); + + /* + * If the successor is dirty, remove it from the set of dirty + * pages. + */ + if (flag_dirty != 0) { + arena_run_dirty_remove(arena, chunk, run_ind+run_pages, + nrun_pages); + } + + size += nrun_size; + run_pages += nrun_pages; + + arena_mapbits_unallocated_size_set(chunk, run_ind, size); + arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, + size); + } + + /* Try to coalesce backward. */ + if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, + run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == + flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == + flag_decommitted) { + size_t prun_size = arena_mapbits_unallocated_size_get(chunk, + run_ind-1); + size_t prun_pages = prun_size >> LG_PAGE; + + run_ind -= prun_pages; + + /* + * Remove predecessor from runs_avail; the coalesced run is + * inserted later. + */ + assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == + prun_size); + assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); + assert(arena_mapbits_decommitted_get(chunk, run_ind) == + flag_decommitted); + arena_avail_remove(arena, chunk, run_ind, prun_pages); + + /* + * If the predecessor is dirty, remove it from the set of dirty + * pages. + */ + if (flag_dirty != 0) { + arena_run_dirty_remove(arena, chunk, run_ind, + prun_pages); + } + + size += prun_size; + run_pages += prun_pages; + + arena_mapbits_unallocated_size_set(chunk, run_ind, size); + arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, + size); + } + + *p_size = size; + *p_run_ind = run_ind; + *p_run_pages = run_pages; +} + +static size_t +arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t run_ind) +{ + size_t size; + + assert(run_ind >= map_bias); + assert(run_ind < chunk_npages); + + if (arena_mapbits_large_get(chunk, run_ind) != 0) { + size = arena_mapbits_large_size_get(chunk, run_ind); + assert(size == PAGE || arena_mapbits_large_size_get(chunk, + run_ind+(size>>LG_PAGE)-1) == 0); + } else { + arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; + size = bin_info->run_size; + } + + return (size); +} + +static void +arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, + bool cleaned, bool decommitted) +{ + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); + assert(run_ind >= map_bias); + assert(run_ind < chunk_npages); + size = arena_run_size_get(arena, chunk, run, run_ind); + run_pages = (size >> LG_PAGE); + arena_nactive_sub(arena, run_pages); + + /* + * The run is dirty if the caller claims to have dirtied it, as well as + * if it was already dirty before being allocated and the caller + * doesn't claim to have cleaned it. + */ + assert(arena_mapbits_dirty_get(chunk, run_ind) == + arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); + if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) + != 0) + dirty = true; + flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; + flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; + + /* Mark pages as unallocated in the chunk map. */ + if (dirty || decommitted) { + size_t flags = flag_dirty | flag_decommitted; + arena_mapbits_unallocated_set(chunk, run_ind, size, flags); + arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, + flags); + } else { + arena_mapbits_unallocated_set(chunk, run_ind, size, + arena_mapbits_unzeroed_get(chunk, run_ind)); + arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, + arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); + } + + arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, + flag_dirty, flag_decommitted); + + /* Insert into runs_avail, now that coalescing is complete. */ + assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == + arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); + assert(arena_mapbits_dirty_get(chunk, run_ind) == + arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); + assert(arena_mapbits_decommitted_get(chunk, run_ind) == + arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); + arena_avail_insert(arena, chunk, run_ind, run_pages); + + if (dirty) + arena_run_dirty_insert(arena, chunk, run_ind, run_pages); + + /* Deallocate chunk if it is now completely unused. */ + if (size == arena_maxrun) { + assert(run_ind == map_bias); + assert(run_pages == (arena_maxrun >> LG_PAGE)); + arena_chunk_dalloc(tsdn, arena, chunk); + } + + /* + * It is okay to do dirty page processing here even if the chunk was + * deallocated above, since in that case it is the spare. Waiting + * until after possible chunk deallocation to do dirty processing + * allows for an old spare to be fully deallocated, thus decreasing the + * chances of spuriously crossing the dirty page purging threshold. + */ + if (dirty) + arena_maybe_purge(tsdn, arena); +} + +static void +arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + size_t pageind = arena_miscelm_to_pageind(miscelm); + size_t head_npages = (oldsize - newsize) >> LG_PAGE; + size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); + size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); + size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; + + assert(oldsize > newsize); + + /* + * Update the chunk map so that arena_run_dalloc() can treat the + * leading run as separately allocated. Set the last element of each + * run first, in case of single-page runs. + */ + assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); + arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages-1))); + arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); + + if (config_debug) { + UNUSED size_t tail_npages = newsize >> LG_PAGE; + assert(arena_mapbits_large_size_get(chunk, + pageind+head_npages+tail_npages-1) == 0); + assert(arena_mapbits_dirty_get(chunk, + pageind+head_npages+tail_npages-1) == flag_dirty); + } + arena_mapbits_large_set(chunk, pageind+head_npages, newsize, + flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages))); + + arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != + 0)); +} + +static void +arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + size_t pageind = arena_miscelm_to_pageind(miscelm); + size_t head_npages = newsize >> LG_PAGE; + size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); + size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); + size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; + arena_chunk_map_misc_t *tail_miscelm; + arena_run_t *tail_run; + + assert(oldsize > newsize); + + /* + * Update the chunk map so that arena_run_dalloc() can treat the + * trailing run as separately allocated. Set the last element of each + * run first, in case of single-page runs. + */ + assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); + arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages-1))); + arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); + + if (config_debug) { + UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; + assert(arena_mapbits_large_size_get(chunk, + pageind+head_npages+tail_npages-1) == 0); + assert(arena_mapbits_dirty_get(chunk, + pageind+head_npages+tail_npages-1) == flag_dirty); + } + arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, + flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages))); + + tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); + tail_run = &tail_miscelm->run; + arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted + != 0)); +} + +static void +arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + + arena_run_heap_insert(&bin->runs, miscelm); +} + +static arena_run_t * +arena_bin_nonfull_run_tryget(arena_bin_t *bin) +{ + arena_chunk_map_misc_t *miscelm; + + miscelm = arena_run_heap_remove_first(&bin->runs); + if (miscelm == NULL) + return (NULL); + if (config_stats) + bin->stats.reruns++; + + return (&miscelm->run); +} + +static arena_run_t * +arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) +{ + arena_run_t *run; + szind_t binind; + arena_bin_info_t *bin_info; + + /* Look for a usable run. */ + run = arena_bin_nonfull_run_tryget(bin); + if (run != NULL) + return (run); + /* No existing runs have any space available. */ + + binind = arena_bin_index(arena, bin); + bin_info = &arena_bin_info[binind]; + + /* Allocate a new run. */ + malloc_mutex_unlock(tsdn, &bin->lock); + /******************************/ + malloc_mutex_lock(tsdn, &arena->lock); + run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); + if (run != NULL) { + /* Initialize run internals. */ + run->binind = binind; + run->nfree = bin_info->nregs; + bitmap_init(run->bitmap, &bin_info->bitmap_info); + } + malloc_mutex_unlock(tsdn, &arena->lock); + /********************************/ + malloc_mutex_lock(tsdn, &bin->lock); + if (run != NULL) { + if (config_stats) { + bin->stats.nruns++; + bin->stats.curruns++; + } + return (run); + } + + /* + * arena_run_alloc_small() failed, but another thread may have made + * sufficient memory available while this one dropped bin->lock above, + * so search one more time. + */ + run = arena_bin_nonfull_run_tryget(bin); + if (run != NULL) + return (run); + + return (NULL); +} + +/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ +static void * +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) +{ + szind_t binind; + arena_bin_info_t *bin_info; + arena_run_t *run; + + binind = arena_bin_index(arena, bin); + bin_info = &arena_bin_info[binind]; + bin->runcur = NULL; + run = arena_bin_nonfull_run_get(tsdn, arena, bin); + if (bin->runcur != NULL && bin->runcur->nfree > 0) { + /* + * Another thread updated runcur while this one ran without the + * bin lock in arena_bin_nonfull_run_get(). + */ + void *ret; + assert(bin->runcur->nfree > 0); + ret = arena_run_reg_alloc(bin->runcur, bin_info); + if (run != NULL) { + arena_chunk_t *chunk; + + /* + * arena_run_alloc_small() may have allocated run, or + * it may have pulled run from the bin's run tree. + * Therefore it is unsafe to make any assumptions about + * how run has previously been used, and + * arena_bin_lower_run() must be called, as if a region + * were just deallocated from the run. + */ + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + if (run->nfree == bin_info->nregs) { + arena_dalloc_bin_run(tsdn, arena, chunk, run, + bin); + } else + arena_bin_lower_run(arena, run, bin); + } + return (ret); + } + + if (run == NULL) + return (NULL); + + bin->runcur = run; + + assert(bin->runcur->nfree > 0); + + return (arena_run_reg_alloc(bin->runcur, bin_info)); +} + +void +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, + szind_t binind, uint64_t prof_accumbytes) +{ + unsigned i, nfill; + arena_bin_t *bin; + + assert(tbin->ncached == 0); + + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) + prof_idump(tsdn); + bin = &arena->bins[binind]; + malloc_mutex_lock(tsdn, &bin->lock); + for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> + tbin->lg_fill_div); i < nfill; i++) { + arena_run_t *run; + void *ptr; + if ((run = bin->runcur) != NULL && run->nfree > 0) + ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); + else + ptr = arena_bin_malloc_hard(tsdn, arena, bin); + if (ptr == NULL) { + /* + * OOM. tbin->avail isn't yet filled down to its first + * element, so the successful allocations (if any) must + * be moved just before tbin->avail before bailing out. + */ + if (i > 0) { + memmove(tbin->avail - i, tbin->avail - nfill, + i * sizeof(void *)); + } + break; + } + if (config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ptr, &arena_bin_info[binind], + true); + } + /* Insert such that low regions get used first. */ + *(tbin->avail - nfill + i) = ptr; + } + if (config_stats) { + bin->stats.nmalloc += i; + bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.curregs += i; + bin->stats.nfills++; + tbin->tstats.nrequests = 0; + } + malloc_mutex_unlock(tsdn, &bin->lock); + tbin->ncached = i; + arena_decay_tick(tsdn, arena); +} + +void +arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) +{ + + size_t redzone_size = bin_info->redzone_size; + + if (zero) { + memset((void *)((uintptr_t)ptr - redzone_size), + JEMALLOC_ALLOC_JUNK, redzone_size); + memset((void *)((uintptr_t)ptr + bin_info->reg_size), + JEMALLOC_ALLOC_JUNK, redzone_size); + } else { + memset((void *)((uintptr_t)ptr - redzone_size), + JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); + } +} + +#ifdef JEMALLOC_JET +#undef arena_redzone_corruption +#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) +#endif +static void +arena_redzone_corruption(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + malloc_printf(": Corrupt redzone %zu byte%s %s %p " + "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", + after ? "after" : "before", ptr, usize, byte); +} +#ifdef JEMALLOC_JET +#undef arena_redzone_corruption +#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) +arena_redzone_corruption_t *arena_redzone_corruption = + JEMALLOC_N(n_arena_redzone_corruption); +#endif + +static void +arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) +{ + bool error = false; + + if (opt_junk_alloc) { + size_t size = bin_info->reg_size; + size_t redzone_size = bin_info->redzone_size; + size_t i; + + for (i = 1; i <= redzone_size; i++) { + uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); + if (*byte != JEMALLOC_ALLOC_JUNK) { + error = true; + arena_redzone_corruption(ptr, size, false, i, + *byte); + if (reset) + *byte = JEMALLOC_ALLOC_JUNK; + } + } + for (i = 0; i < redzone_size; i++) { + uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); + if (*byte != JEMALLOC_ALLOC_JUNK) { + error = true; + arena_redzone_corruption(ptr, size, true, i, + *byte); + if (reset) + *byte = JEMALLOC_ALLOC_JUNK; + } + } + } + + if (opt_abort && error) + abort(); +} + +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_small +#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) +#endif +void +arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) +{ + size_t redzone_size = bin_info->redzone_size; + + arena_redzones_validate(ptr, bin_info, false); + memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, + bin_info->reg_interval); +} +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_small +#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) +arena_dalloc_junk_small_t *arena_dalloc_junk_small = + JEMALLOC_N(n_arena_dalloc_junk_small); +#endif + +void +arena_quarantine_junk_small(void *ptr, size_t usize) +{ + szind_t binind; + arena_bin_info_t *bin_info; + cassert(config_fill); + assert(opt_junk_free); + assert(opt_quarantine); + assert(usize <= SMALL_MAXCLASS); + + binind = size2index(usize); + bin_info = &arena_bin_info[binind]; + arena_redzones_validate(ptr, bin_info, true); +} + +static void * +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) +{ + void *ret; + arena_bin_t *bin; + size_t usize; + arena_run_t *run; + + assert(binind < NBINS); + bin = &arena->bins[binind]; + usize = index2size(binind); + + malloc_mutex_lock(tsdn, &bin->lock); + if ((run = bin->runcur) != NULL && run->nfree > 0) + ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); + else + ret = arena_bin_malloc_hard(tsdn, arena, bin); + + if (ret == NULL) { + malloc_mutex_unlock(tsdn, &bin->lock); + return (NULL); + } + + if (config_stats) { + bin->stats.nmalloc++; + bin->stats.nrequests++; + bin->stats.curregs++; + } + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) + prof_idump(tsdn); + + if (!zero) { + if (config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, + &arena_bin_info[binind], false); + } else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); + } else { + if (config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &arena_bin_info[binind], + true); + } + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); + memset(ret, 0, usize); + } + + arena_decay_tick(tsdn, arena); + return (ret); +} + +void * +arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) +{ + void *ret; + size_t usize; + uintptr_t random_offset; + arena_run_t *run; + arena_chunk_map_misc_t *miscelm; + UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); + + /* Large allocation. */ + usize = index2size(binind); + malloc_mutex_lock(tsdn, &arena->lock); + if (config_cache_oblivious) { + uint64_t r; + + /* + * Compute a uniformly distributed offset within the first page + * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 + * for 4 KiB pages and 64-byte cachelines. + */ + r = prng_lg_range_zu(&arena->offset_state, LG_PAGE - + LG_CACHELINE, false); + random_offset = ((uintptr_t)r) << LG_CACHELINE; + } else + random_offset = 0; + run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); + if (run == NULL) { + malloc_mutex_unlock(tsdn, &arena->lock); + return (NULL); + } + miscelm = arena_run_to_miscelm(run); + ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + + random_offset); + if (config_stats) { + szind_t index = binind - NBINS; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += usize; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; + } + if (config_prof) + idump = arena_prof_accum_locked(arena, usize); + malloc_mutex_unlock(tsdn, &arena->lock); + if (config_prof && idump) + prof_idump(tsdn); + + if (!zero) { + if (config_fill) { + if (unlikely(opt_junk_alloc)) + memset(ret, JEMALLOC_ALLOC_JUNK, usize); + else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + } + + arena_decay_tick(tsdn, arena); + return (ret); +} + +void * +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero) +{ + + assert(!tsdn_null(tsdn) || arena != NULL); + + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); + if (unlikely(arena == NULL)) + return (NULL); + + if (likely(size <= SMALL_MAXCLASS)) + return (arena_malloc_small(tsdn, arena, ind, zero)); + if (likely(size <= large_maxclass)) + return (arena_malloc_large(tsdn, arena, ind, zero)); + assert(index2size(ind) >= chunksize); + return (huge_malloc(tsdn, arena, index2size(ind), zero)); +} + +/* Only handles large allocations that require more than page alignment. */ +static void * +arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero) +{ + void *ret; + size_t alloc_size, leadsize, trailsize; + arena_run_t *run; + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + void *rpages; + + assert(!tsdn_null(tsdn) || arena != NULL); + assert(usize == PAGE_CEILING(usize)); + + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); + if (unlikely(arena == NULL)) + return (NULL); + + alignment = PAGE_CEILING(alignment); + alloc_size = usize + large_pad + alignment - PAGE; + + malloc_mutex_lock(tsdn, &arena->lock); + run = arena_run_alloc_large(tsdn, arena, alloc_size, false); + if (run == NULL) { + malloc_mutex_unlock(tsdn, &arena->lock); + return (NULL); + } + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + rpages = arena_miscelm_to_rpages(miscelm); + + leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - + (uintptr_t)rpages; + assert(alloc_size >= leadsize + usize); + trailsize = alloc_size - leadsize - usize - large_pad; + if (leadsize != 0) { + arena_chunk_map_misc_t *head_miscelm = miscelm; + arena_run_t *head_run = run; + + miscelm = arena_miscelm_get_mutable(chunk, + arena_miscelm_to_pageind(head_miscelm) + (leadsize >> + LG_PAGE)); + run = &miscelm->run; + + arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, + alloc_size - leadsize); + } + if (trailsize != 0) { + arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + + trailsize, usize + large_pad, false); + } + if (arena_run_init_large(arena, run, usize + large_pad, zero)) { + size_t run_ind = + arena_miscelm_to_pageind(arena_run_to_miscelm(run)); + bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); + bool decommitted = (arena_mapbits_decommitted_get(chunk, + run_ind) != 0); + + assert(decommitted); /* Cause of OOM. */ + arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); + malloc_mutex_unlock(tsdn, &arena->lock); + return (NULL); + } + ret = arena_miscelm_to_rpages(miscelm); + + if (config_stats) { + szind_t index = size2index(usize) - NBINS; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += usize; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; + } + malloc_mutex_unlock(tsdn, &arena->lock); + + if (config_fill && !zero) { + if (unlikely(opt_junk_alloc)) + memset(ret, JEMALLOC_ALLOC_JUNK, usize); + else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + arena_decay_tick(tsdn, arena); + return (ret); +} + +void * +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) +{ + void *ret; + + if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE + && (usize & PAGE_MASK) == 0))) { + /* Small; alignment doesn't require special run placement. */ + ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, + tcache, true); + } else if (usize <= large_maxclass && alignment <= PAGE) { + /* + * Large; alignment doesn't require special run placement. + * However, the cached pointer may be at a random offset from + * the base of the run, so do some bit manipulation to retrieve + * the base. + */ + ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, + tcache, true); + if (config_cache_oblivious) + ret = (void *)((uintptr_t)ret & ~PAGE_MASK); + } else { + if (likely(usize <= large_maxclass)) { + ret = arena_palloc_large(tsdn, arena, usize, alignment, + zero); + } else if (likely(alignment <= chunksize)) + ret = huge_malloc(tsdn, arena, usize, zero); + else { + ret = huge_palloc(tsdn, arena, usize, alignment, zero); + } + } + return (ret); +} + +void +arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) +{ + arena_chunk_t *chunk; + size_t pageind; + szind_t binind; + + cassert(config_prof); + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); + assert(size <= SMALL_MAXCLASS); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + binind = size2index(size); + assert(binind < NBINS); + arena_mapbits_large_binind_set(chunk, pageind, binind); + + assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, true) == size); +} + +static void +arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + + /* Dissociate run from bin. */ + if (run == bin->runcur) + bin->runcur = NULL; + else { + szind_t binind = arena_bin_index(extent_node_arena_get( + &chunk->node), bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + + /* + * The following block's conditional is necessary because if the + * run only contains one region, then it never gets inserted + * into the non-full runs tree. + */ + if (bin_info->nregs != 1) { + arena_chunk_map_misc_t *miscelm = + arena_run_to_miscelm(run); + + arena_run_heap_remove(&bin->runs, miscelm); + } + } +} + +static void +arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin) +{ + + assert(run != bin->runcur); + + malloc_mutex_unlock(tsdn, &bin->lock); + /******************************/ + malloc_mutex_lock(tsdn, &arena->lock); + arena_run_dalloc(tsdn, arena, run, true, false, false); + malloc_mutex_unlock(tsdn, &arena->lock); + /****************************/ + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) + bin->stats.curruns--; +} + +static void +arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin) +{ + + /* + * Make sure that if bin->runcur is non-NULL, it refers to the + * oldest/lowest non-full run. It is okay to NULL runcur out rather + * than proactively keeping it pointing at the oldest/lowest non-full + * run. + */ + if (bin->runcur != NULL && + arena_snad_comp(arena_run_to_miscelm(bin->runcur), + arena_run_to_miscelm(run)) > 0) { + /* Switch runcur. */ + if (bin->runcur->nfree > 0) + arena_bin_runs_insert(bin, bin->runcur); + bin->runcur = run; + if (config_stats) + bin->stats.reruns++; + } else + arena_bin_runs_insert(bin, run); +} + +static void +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) +{ + size_t pageind, rpages_ind; + arena_run_t *run; + arena_bin_t *bin; + arena_bin_info_t *bin_info; + szind_t binind; + + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); + run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; + binind = run->binind; + bin = &arena->bins[binind]; + bin_info = &arena_bin_info[binind]; + + if (!junked && config_fill && unlikely(opt_junk_free)) + arena_dalloc_junk_small(ptr, bin_info); + + arena_run_reg_dalloc(run, ptr); + if (run->nfree == bin_info->nregs) { + arena_dissociate_bin_run(chunk, run, bin); + arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); + } else if (run->nfree == 1 && run != bin->runcur) + arena_bin_lower_run(arena, run, bin); + + if (config_stats) { + bin->stats.ndalloc++; + bin->stats.curregs--; + } +} + +void +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) +{ + + arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); +} + +void +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind, arena_chunk_map_bits_t *bitselm) +{ + arena_run_t *run; + arena_bin_t *bin; + size_t rpages_ind; + + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); + run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; + bin = &arena->bins[run->binind]; + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); + malloc_mutex_unlock(tsdn, &bin->lock); +} + +void +arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind) +{ + arena_chunk_map_bits_t *bitselm; + + if (config_debug) { + /* arena_ptr_small_binind_get() does extra sanity checking. */ + assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, + pageind)) != BININD_INVALID); + } + bitselm = arena_bitselm_get_mutable(chunk, pageind); + arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); + arena_decay_tick(tsdn, arena); +} + +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_large +#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) +#endif +void +arena_dalloc_junk_large(void *ptr, size_t usize) +{ + + if (config_fill && unlikely(opt_junk_free)) + memset(ptr, JEMALLOC_FREE_JUNK, usize); +} +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_large +#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) +arena_dalloc_junk_large_t *arena_dalloc_junk_large = + JEMALLOC_N(n_arena_dalloc_junk_large); +#endif + +static void +arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, bool junked) +{ + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); + arena_run_t *run = &miscelm->run; + + if (config_fill || config_stats) { + size_t usize = arena_mapbits_large_size_get(chunk, pageind) - + large_pad; + + if (!junked) + arena_dalloc_junk_large(ptr, usize); + if (config_stats) { + szind_t index = size2index(usize) - NBINS; + + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= usize; + arena->stats.lstats[index].ndalloc++; + arena->stats.lstats[index].curruns--; + } + } + + arena_run_dalloc(tsdn, arena, run, true, false, false); +} + +void +arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr) +{ + + arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); +} + +void +arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr) +{ + + malloc_mutex_lock(tsdn, &arena->lock); + arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); + malloc_mutex_unlock(tsdn, &arena->lock); + arena_decay_tick(tsdn, arena); +} + +static void +arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t size) +{ + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); + arena_run_t *run = &miscelm->run; + + assert(size < oldsize); + + /* + * Shrink the run, and make trailing pages available for other + * allocations. + */ + malloc_mutex_lock(tsdn, &arena->lock); + arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + + large_pad, true); + if (config_stats) { + szind_t oldindex = size2index(oldsize) - NBINS; + szind_t index = size2index(size) - NBINS; + + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= oldsize; + arena->stats.lstats[oldindex].ndalloc++; + arena->stats.lstats[oldindex].curruns--; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; + } + malloc_mutex_unlock(tsdn, &arena->lock); +} + +static bool +arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) +{ + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t npages = (oldsize + large_pad) >> LG_PAGE; + size_t followsize; + + assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - + large_pad); + + /* Try to extend the run. */ + malloc_mutex_lock(tsdn, &arena->lock); + if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, + pageind+npages) != 0) + goto label_fail; + followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); + if (oldsize + followsize >= usize_min) { + /* + * The next run is available and sufficiently large. Split the + * following run, then merge the first part with the existing + * allocation. + */ + arena_run_t *run; + size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; + + usize = usize_max; + while (oldsize + followsize < usize) + usize = index2size(size2index(usize)-1); + assert(usize >= usize_min); + assert(usize >= oldsize); + splitsize = usize - oldsize; + if (splitsize == 0) + goto label_fail; + + run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; + if (arena_run_split_large(arena, run, splitsize, zero)) + goto label_fail; + + if (config_cache_oblivious && zero) { + /* + * Zero the trailing bytes of the original allocation's + * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the run is a multiple of + * CACHELINE in [0 .. PAGE). + */ + void *zbase = (void *)((uintptr_t)ptr + oldsize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); + } + + size = oldsize + splitsize; + npages = (size + large_pad) >> LG_PAGE; + + /* + * Mark the extended run as dirty if either portion of the run + * was dirty before allocation. This is rather pedantic, + * because there's not actually any sequence of events that + * could cause the resulting run to be passed to + * arena_run_dalloc() with the dirty argument set to false + * (which is when dirty flag consistency would really matter). + */ + flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | + arena_mapbits_dirty_get(chunk, pageind+npages-1); + flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; + arena_mapbits_large_set(chunk, pageind, size + large_pad, + flag_dirty | (flag_unzeroed_mask & + arena_mapbits_unzeroed_get(chunk, pageind))); + arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+npages-1))); + + if (config_stats) { + szind_t oldindex = size2index(oldsize) - NBINS; + szind_t index = size2index(size) - NBINS; + + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= oldsize; + arena->stats.lstats[oldindex].ndalloc++; + arena->stats.lstats[oldindex].curruns--; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; + } + malloc_mutex_unlock(tsdn, &arena->lock); + return (false); + } +label_fail: + malloc_mutex_unlock(tsdn, &arena->lock); + return (true); +} + +#ifdef JEMALLOC_JET +#undef arena_ralloc_junk_large +#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) +#endif +static void +arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) +{ + + if (config_fill && unlikely(opt_junk_free)) { + memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, + old_usize - usize); + } +} +#ifdef JEMALLOC_JET +#undef arena_ralloc_junk_large +#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) +arena_ralloc_junk_large_t *arena_ralloc_junk_large = + JEMALLOC_N(n_arena_ralloc_junk_large); +#endif + +/* + * Try to resize a large allocation, in order to avoid copying. This will + * always fail if growing an object, and the following run is already in use. + */ +static bool +arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) +{ + arena_chunk_t *chunk; + arena_t *arena; + + if (oldsize == usize_max) { + /* Current size class is compatible and maximal. */ + return (false); + } + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = extent_node_arena_get(&chunk->node); + + if (oldsize < usize_max) { + bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, + oldsize, usize_min, usize_max, zero); + if (config_fill && !ret && !zero) { + if (unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)ptr + oldsize), + JEMALLOC_ALLOC_JUNK, + isalloc(tsdn, ptr, config_prof) - oldsize); + } else if (unlikely(opt_zero)) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + isalloc(tsdn, ptr, config_prof) - oldsize); + } + } + return (ret); + } + + assert(oldsize > usize_max); + /* Fill before shrinking in order avoid a race. */ + arena_ralloc_junk_large(ptr, oldsize, usize_max); + arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); + return (false); +} + +bool +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero) +{ + size_t usize_min, usize_max; + + /* Calls with non-zero extra had to clamp extra. */ + assert(extra == 0 || size + extra <= HUGE_MAXCLASS); + + if (unlikely(size > HUGE_MAXCLASS)) + return (true); + + usize_min = s2u(size); + usize_max = s2u(size + extra); + if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { + arena_chunk_t *chunk; + + /* + * Avoid moving the allocation if the size class can be left the + * same. + */ + if (oldsize <= SMALL_MAXCLASS) { + assert(arena_bin_info[size2index(oldsize)].reg_size == + oldsize); + if ((usize_max > SMALL_MAXCLASS || + size2index(usize_max) != size2index(oldsize)) && + (size > oldsize || usize_max < oldsize)) + return (true); + } else { + if (usize_max <= SMALL_MAXCLASS) + return (true); + if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, + usize_max, zero)) + return (true); + } + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); + return (false); + } else { + return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, + usize_max, zero)); + } +} + +static void * +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) +{ + + if (alignment == 0) + return (arena_malloc(tsdn, arena, usize, size2index(usize), + zero, tcache, true)); + usize = sa2u(usize, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + return (NULL); + return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); +} + +void * +arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero, tcache_t *tcache) +{ + void *ret; + size_t usize; + + usize = s2u(size); + if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) + return (NULL); + + if (likely(usize <= large_maxclass)) { + size_t copysize; + + /* Try to avoid moving the allocation. */ + if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, + zero)) + return (ptr); + + /* + * size and oldsize are different enough that we need to move + * the object. In that case, fall back to allocating new space + * and copying. + */ + ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, + alignment, zero, tcache); + if (ret == NULL) + return (NULL); + + /* + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). + */ + + copysize = (usize < oldsize) ? usize : oldsize; + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); + memcpy(ret, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache, true); + } else { + ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, + zero, tcache); + } + return (ret); +} + +dss_prec_t +arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) +{ + dss_prec_t ret; + + malloc_mutex_lock(tsdn, &arena->lock); + ret = arena->dss_prec; + malloc_mutex_unlock(tsdn, &arena->lock); + return (ret); +} + +bool +arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) +{ + + if (!have_dss) + return (dss_prec != dss_prec_disabled); + malloc_mutex_lock(tsdn, &arena->lock); + arena->dss_prec = dss_prec; + malloc_mutex_unlock(tsdn, &arena->lock); + return (false); +} + +ssize_t +arena_lg_dirty_mult_default_get(void) +{ + + return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); +} + +bool +arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) +{ + + if (opt_purge != purge_mode_ratio) + return (true); + if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) + return (true); + atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); + return (false); +} + +ssize_t +arena_decay_time_default_get(void) +{ + + return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); +} + +bool +arena_decay_time_default_set(ssize_t decay_time) +{ + + if (opt_purge != purge_mode_decay) + return (true); + if (!arena_decay_time_valid(decay_time)) + return (true); + atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); + return (false); +} + +static void +arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty) +{ + + *nthreads += arena_nthreads_get(arena, false); + *dss = dss_prec_names[arena->dss_prec]; + *lg_dirty_mult = arena->lg_dirty_mult; + *decay_time = arena->decay.time; + *nactive += arena->nactive; + *ndirty += arena->ndirty; +} + +void +arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty) +{ + + malloc_mutex_lock(tsdn, &arena->lock); + arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, + decay_time, nactive, ndirty); + malloc_mutex_unlock(tsdn, &arena->lock); +} + +void +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, + malloc_huge_stats_t *hstats) +{ + unsigned i; + + cassert(config_stats); + + malloc_mutex_lock(tsdn, &arena->lock); + arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, + decay_time, nactive, ndirty); + + astats->mapped += arena->stats.mapped; + astats->retained += arena->stats.retained; + astats->npurge += arena->stats.npurge; + astats->nmadvise += arena->stats.nmadvise; + astats->purged += arena->stats.purged; + astats->metadata_mapped += arena->stats.metadata_mapped; + astats->metadata_allocated += arena_metadata_allocated_get(arena); + astats->allocated_large += arena->stats.allocated_large; + astats->nmalloc_large += arena->stats.nmalloc_large; + astats->ndalloc_large += arena->stats.ndalloc_large; + astats->nrequests_large += arena->stats.nrequests_large; + astats->allocated_huge += arena->stats.allocated_huge; + astats->nmalloc_huge += arena->stats.nmalloc_huge; + astats->ndalloc_huge += arena->stats.ndalloc_huge; + + for (i = 0; i < nlclasses; i++) { + lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; + lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; + lstats[i].nrequests += arena->stats.lstats[i].nrequests; + lstats[i].curruns += arena->stats.lstats[i].curruns; + } + + for (i = 0; i < nhclasses; i++) { + hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; + hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; + hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; + } + malloc_mutex_unlock(tsdn, &arena->lock); + + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + + malloc_mutex_lock(tsdn, &bin->lock); + bstats[i].nmalloc += bin->stats.nmalloc; + bstats[i].ndalloc += bin->stats.ndalloc; + bstats[i].nrequests += bin->stats.nrequests; + bstats[i].curregs += bin->stats.curregs; + if (config_tcache) { + bstats[i].nfills += bin->stats.nfills; + bstats[i].nflushes += bin->stats.nflushes; + } + bstats[i].nruns += bin->stats.nruns; + bstats[i].reruns += bin->stats.reruns; + bstats[i].curruns += bin->stats.curruns; + malloc_mutex_unlock(tsdn, &bin->lock); + } +} + +unsigned +arena_nthreads_get(arena_t *arena, bool internal) +{ + + return (atomic_read_u(&arena->nthreads[internal])); +} + +void +arena_nthreads_inc(arena_t *arena, bool internal) +{ + + atomic_add_u(&arena->nthreads[internal], 1); +} + +void +arena_nthreads_dec(arena_t *arena, bool internal) +{ + + atomic_sub_u(&arena->nthreads[internal], 1); +} + +size_t +arena_extent_sn_next(arena_t *arena) +{ + + return (atomic_add_z(&arena->extent_sn_next, 1) - 1); +} + +arena_t * +arena_new(tsdn_t *tsdn, unsigned ind) +{ + arena_t *arena; + unsigned i; + + /* + * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly + * because there is no way to clean up if base_alloc() OOMs. + */ + if (config_stats) { + arena = (arena_t *)base_alloc(tsdn, + CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t))) + + (nhclasses * sizeof(malloc_huge_stats_t))); + } else + arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t)); + if (arena == NULL) + return (NULL); + + arena->ind = ind; + arena->nthreads[0] = arena->nthreads[1] = 0; + if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) + return (NULL); + + if (config_stats) { + memset(&arena->stats, 0, sizeof(arena_stats_t)); + arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena + + CACHELINE_CEILING(sizeof(arena_t))); + memset(arena->stats.lstats, 0, nlclasses * + sizeof(malloc_large_stats_t)); + arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena + + CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); + memset(arena->stats.hstats, 0, nhclasses * + sizeof(malloc_huge_stats_t)); + if (config_tcache) + ql_new(&arena->tcache_ql); + } + + if (config_prof) + arena->prof_accumbytes = 0; + + if (config_cache_oblivious) { + /* + * A nondeterministic seed based on the address of arena reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + arena->offset_state = config_debug ? ind : + (size_t)(uintptr_t)arena; + } + + arena->dss_prec = chunk_dss_prec_get(); + + ql_new(&arena->achunks); + + arena->extent_sn_next = 0; + + arena->spare = NULL; + + arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); + arena->purging = false; + arena->nactive = 0; + arena->ndirty = 0; + + for (i = 0; i < NPSIZES; i++) + arena_run_heap_new(&arena->runs_avail[i]); + + qr_new(&arena->runs_dirty, rd_link); + qr_new(&arena->chunks_cache, cc_link); + + if (opt_purge == purge_mode_decay) + arena_decay_init(arena, arena_decay_time_default_get()); + + ql_new(&arena->huge); + if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", + WITNESS_RANK_ARENA_HUGE)) + return (NULL); + + extent_tree_szsnad_new(&arena->chunks_szsnad_cached); + extent_tree_ad_new(&arena->chunks_ad_cached); + extent_tree_szsnad_new(&arena->chunks_szsnad_retained); + extent_tree_ad_new(&arena->chunks_ad_retained); + if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", + WITNESS_RANK_ARENA_CHUNKS)) + return (NULL); + ql_new(&arena->node_cache); + if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", + WITNESS_RANK_ARENA_NODE_CACHE)) + return (NULL); + + arena->chunk_hooks = chunk_hooks_default; + + /* Initialize bins. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock, "arena_bin", + WITNESS_RANK_ARENA_BIN)) + return (NULL); + bin->runcur = NULL; + arena_run_heap_new(&bin->runs); + if (config_stats) + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + } + + return (arena); +} + +/* + * Calculate bin_info->run_size such that it meets the following constraints: + * + * *) bin_info->run_size <= arena_maxrun + * *) bin_info->nregs <= RUN_MAXREGS + * + * bin_info->nregs and bin_info->reg0_offset are also calculated here, since + * these settings are all interdependent. + */ +static void +bin_info_run_size_calc(arena_bin_info_t *bin_info) +{ + size_t pad_size; + size_t try_run_size, perfect_run_size, actual_run_size; + uint32_t try_nregs, perfect_nregs, actual_nregs; + + /* + * Determine redzone size based on minimum alignment and minimum + * redzone size. Add padding to the end of the run if it is needed to + * align the regions. The padding allows each redzone to be half the + * minimum alignment; without the padding, each redzone would have to + * be twice as large in order to maintain alignment. + */ + if (config_fill && unlikely(opt_redzone)) { + size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); + if (align_min <= REDZONE_MINSIZE) { + bin_info->redzone_size = REDZONE_MINSIZE; + pad_size = 0; + } else { + bin_info->redzone_size = align_min >> 1; + pad_size = bin_info->redzone_size; + } + } else { + bin_info->redzone_size = 0; + pad_size = 0; + } + bin_info->reg_interval = bin_info->reg_size + + (bin_info->redzone_size << 1); + + /* + * Compute run size under ideal conditions (no redzones, no limit on run + * size). + */ + try_run_size = PAGE; + try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); + do { + perfect_run_size = try_run_size; + perfect_nregs = try_nregs; + + try_run_size += PAGE; + try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); + } while (perfect_run_size != perfect_nregs * bin_info->reg_size); + assert(perfect_nregs <= RUN_MAXREGS); + + actual_run_size = perfect_run_size; + actual_nregs = (uint32_t)((actual_run_size - pad_size) / + bin_info->reg_interval); + + /* + * Redzones can require enough padding that not even a single region can + * fit within the number of pages that would normally be dedicated to a + * run for this size class. Increase the run size until at least one + * region fits. + */ + while (actual_nregs == 0) { + assert(config_fill && unlikely(opt_redzone)); + + actual_run_size += PAGE; + actual_nregs = (uint32_t)((actual_run_size - pad_size) / + bin_info->reg_interval); + } + + /* + * Make sure that the run will fit within an arena chunk. + */ + while (actual_run_size > arena_maxrun) { + actual_run_size -= PAGE; + actual_nregs = (uint32_t)((actual_run_size - pad_size) / + bin_info->reg_interval); + } + assert(actual_nregs > 0); + assert(actual_run_size == s2u(actual_run_size)); + + /* Copy final settings. */ + bin_info->run_size = actual_run_size; + bin_info->nregs = actual_nregs; + bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * + bin_info->reg_interval) - pad_size + bin_info->redzone_size); + + assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs + * bin_info->reg_interval) + pad_size == bin_info->run_size); +} + +static void +bin_info_init(void) +{ + arena_bin_info_t *bin_info; + +#define BIN_INFO_INIT_bin_yes(index, size) \ + bin_info = &arena_bin_info[index]; \ + bin_info->reg_size = size; \ + bin_info_run_size_calc(bin_info); \ + bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); +#define BIN_INFO_INIT_bin_no(index, size) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ + BIN_INFO_INIT_bin_##bin(index, (ZU(1)< strlen(enabled_states[i])); + } + } + assert(sizeof(enabled_states)/sizeof(const char *) == + sizeof(thp_initially_huge_states)/sizeof(bool)); + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + fd = (int)syscall(SYS_open, + "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#else + fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#endif + if (fd == -1) { + goto label_error; + } + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) + nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); +#else + nread = read(fd, &buf, sizeof(buf)); +#endif + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 1) { + goto label_error; + } + for (i = 0; i < sizeof(enabled_states)/sizeof(const char *); + i++) { + if (strncmp(buf, enabled_states[i], (size_t)nread) == 0) { + thp_initially_huge = thp_initially_huge_states[i]; + return; + } + } + +label_error: + thp_initially_huge = false; +} + +void +arena_boot(void) +{ + unsigned i; + + if (config_thp && opt_thp) { + init_thp_initially_huge(); + } + + arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); + arena_decay_time_default_set(opt_decay_time); + + /* + * Compute the header size such that it is large enough to contain the + * page map. The page map is biased to omit entries for the header + * itself, so some iteration is necessary to compute the map bias. + * + * 1) Compute safe header_size and map_bias values that include enough + * space for an unbiased page map. + * 2) Refine map_bias based on (1) to omit the header pages in the page + * map. The resulting map_bias may be one too small. + * 3) Refine map_bias based on (2). The result will be >= the result + * from (2), and will always be correct. + */ + map_bias = 0; + for (i = 0; i < 3; i++) { + size_t header_size = offsetof(arena_chunk_t, map_bits) + + ((sizeof(arena_chunk_map_bits_t) + + sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); + map_bias = (header_size + PAGE_MASK) >> LG_PAGE; + } + assert(map_bias > 0); + + map_misc_offset = offsetof(arena_chunk_t, map_bits) + + sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); + + arena_maxrun = chunksize - (map_bias << LG_PAGE); + assert(arena_maxrun > 0); + large_maxclass = index2size(size2index(chunksize)-1); + assert(large_maxclass > 0); + assert(large_maxclass + large_pad <= arena_maxrun); + nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); + nhclasses = NSIZES - nlclasses - NBINS; + + bin_info_init(); +} + +void +arena_prefork0(tsdn_t *tsdn, arena_t *arena) +{ + + malloc_mutex_prefork(tsdn, &arena->lock); +} + +void +arena_prefork1(tsdn_t *tsdn, arena_t *arena) +{ + + malloc_mutex_prefork(tsdn, &arena->chunks_mtx); +} + +void +arena_prefork2(tsdn_t *tsdn, arena_t *arena) +{ + + malloc_mutex_prefork(tsdn, &arena->node_cache_mtx); +} + +void +arena_prefork3(tsdn_t *tsdn, arena_t *arena) +{ + unsigned i; + + for (i = 0; i < NBINS; i++) + malloc_mutex_prefork(tsdn, &arena->bins[i].lock); + malloc_mutex_prefork(tsdn, &arena->huge_mtx); +} + +void +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) +{ + unsigned i; + + malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); + for (i = 0; i < NBINS; i++) + malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); + malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->lock); +} + +void +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) +{ + unsigned i; + + malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); + for (i = 0; i < NBINS; i++) + malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); + malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); + malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_child(tsdn, &arena->lock); +} diff --git a/sources/jemalloc/src/je_atomic.c b/sources/jemalloc/src/je_atomic.c new file mode 100755 index 00000000..77ee3131 --- /dev/null +++ b/sources/jemalloc/src/je_atomic.c @@ -0,0 +1,2 @@ +#define JEMALLOC_ATOMIC_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_base.c b/sources/jemalloc/src/je_base.c new file mode 100755 index 00000000..5681a3f3 --- /dev/null +++ b/sources/jemalloc/src/je_base.c @@ -0,0 +1,187 @@ +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +static malloc_mutex_t base_mtx; +static size_t base_extent_sn_next; +static extent_tree_t base_avail_szsnad; +static extent_node_t *base_nodes; +static size_t base_allocated; +static size_t base_resident; +static size_t base_mapped; + +/******************************************************************************/ + +static extent_node_t * +base_node_try_alloc(tsdn_t *tsdn) +{ + extent_node_t *node; + + malloc_mutex_assert_owner(tsdn, &base_mtx); + + if (base_nodes == NULL) + return (NULL); + node = base_nodes; + base_nodes = *(extent_node_t **)node; + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); + return (node); +} + +static void +base_node_dalloc(tsdn_t *tsdn, extent_node_t *node) +{ + + malloc_mutex_assert_owner(tsdn, &base_mtx); + + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); + *(extent_node_t **)node = base_nodes; + base_nodes = node; +} + +static void +base_extent_node_init(extent_node_t *node, void *addr, size_t size) +{ + size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1; + + extent_node_init(node, NULL, addr, size, sn, true, true); +} + +static extent_node_t * +base_chunk_alloc(tsdn_t *tsdn, size_t minsize) +{ + extent_node_t *node; + size_t csize, nsize; + void *addr; + + malloc_mutex_assert_owner(tsdn, &base_mtx); + assert(minsize != 0); + node = base_node_try_alloc(tsdn); + /* Allocate enough space to also carve a node out if necessary. */ + nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; + csize = CHUNK_CEILING(minsize + nsize); + addr = chunk_alloc_base(csize); + if (addr == NULL) { + if (node != NULL) + base_node_dalloc(tsdn, node); + return (NULL); + } + base_mapped += csize; + if (node == NULL) { + node = (extent_node_t *)addr; + addr = (void *)((uintptr_t)addr + nsize); + csize -= nsize; + if (config_stats) { + base_allocated += nsize; + base_resident += PAGE_CEILING(nsize); + } + } + base_extent_node_init(node, addr, csize); + return (node); +} + +/* + * base_alloc() guarantees demand-zeroed memory, in order to make multi-page + * sparse data structures such as radix tree nodes efficient with respect to + * physical memory usage. + */ +void * +base_alloc(tsdn_t *tsdn, size_t size) +{ + void *ret; + size_t csize, usize; + extent_node_t *node; + extent_node_t key; + + /* + * Round size up to nearest multiple of the cacheline size, so that + * there is no chance of false cache line sharing. + */ + csize = CACHELINE_CEILING(size); + + usize = s2u(csize); + extent_node_init(&key, NULL, NULL, usize, 0, false, false); + malloc_mutex_lock(tsdn, &base_mtx); + node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key); + if (node != NULL) { + /* Use existing space. */ + extent_tree_szsnad_remove(&base_avail_szsnad, node); + } else { + /* Try to allocate more space. */ + node = base_chunk_alloc(tsdn, csize); + } + if (node == NULL) { + ret = NULL; + goto label_return; + } + + ret = extent_node_addr_get(node); + if (extent_node_size_get(node) > csize) { + extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); + extent_node_size_set(node, extent_node_size_get(node) - csize); + extent_tree_szsnad_insert(&base_avail_szsnad, node); + } else + base_node_dalloc(tsdn, node); + if (config_stats) { + base_allocated += csize; + /* + * Add one PAGE to base_resident for every page boundary that is + * crossed by the new allocation. + */ + base_resident += PAGE_CEILING((uintptr_t)ret + csize) - + PAGE_CEILING((uintptr_t)ret); + } + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); +label_return: + malloc_mutex_unlock(tsdn, &base_mtx); + return (ret); +} + +void +base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, + size_t *mapped) +{ + + malloc_mutex_lock(tsdn, &base_mtx); + assert(base_allocated <= base_resident); + assert(base_resident <= base_mapped); + *allocated = base_allocated; + *resident = base_resident; + *mapped = base_mapped; + malloc_mutex_unlock(tsdn, &base_mtx); +} + +bool +base_boot(void) +{ + + if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE)) + return (true); + base_extent_sn_next = 0; + extent_tree_szsnad_new(&base_avail_szsnad); + base_nodes = NULL; + + return (false); +} + +void +base_prefork(tsdn_t *tsdn) +{ + + malloc_mutex_prefork(tsdn, &base_mtx); +} + +void +base_postfork_parent(tsdn_t *tsdn) +{ + + malloc_mutex_postfork_parent(tsdn, &base_mtx); +} + +void +base_postfork_child(tsdn_t *tsdn) +{ + + malloc_mutex_postfork_child(tsdn, &base_mtx); +} diff --git a/sources/jemalloc/src/je_bitmap.c b/sources/jemalloc/src/je_bitmap.c new file mode 100755 index 00000000..ac0f3b38 --- /dev/null +++ b/sources/jemalloc/src/je_bitmap.c @@ -0,0 +1,111 @@ +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +#ifdef USE_TREE + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) +{ + unsigned i; + size_t group_count; + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + /* + * Compute the number of groups necessary to store nbits bits, and + * progressively work upward through the levels until reaching a level + * that requires only one group. + */ + binfo->levels[0].group_offset = 0; + group_count = BITMAP_BITS2GROUPS(nbits); + for (i = 1; group_count > 1; i++) { + assert(i < BITMAP_MAX_LEVELS); + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + group_count = BITMAP_BITS2GROUPS(group_count); + } + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); + binfo->nlevels = i; + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) +{ + + return (binfo->levels[binfo->nlevels].group_offset); +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t extra; + unsigned i; + + /* + * Bits are actually inverted with regard to the external bitmap + * interface, so the bitmap starts out with all 1 bits, except for + * trailing unused bits (if any). Note that each group uses bit 0 to + * correspond to the first logical bit in the group, so extra bits + * are the most significant bits of the last group. + */ + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->levels[1].group_offset - 1] >>= extra; + for (i = 1; i < binfo->nlevels; i++) { + size_t group_count = binfo->levels[i].group_offset - + binfo->levels[i-1].group_offset; + extra = (BITMAP_GROUP_NBITS - (group_count & + BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } +} + +#else /* USE_TREE */ + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) +{ + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + binfo->ngroups = BITMAP_BITS2GROUPS(nbits); + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) +{ + + return (binfo->ngroups); +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t extra; + + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->ngroups - 1] >>= extra; +} + +#endif /* USE_TREE */ + +size_t +bitmap_size(const bitmap_info_t *binfo) +{ + + return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); +} diff --git a/sources/jemalloc/src/je_chunk.c b/sources/jemalloc/src/je_chunk.c new file mode 100755 index 00000000..94f28f2d --- /dev/null +++ b/sources/jemalloc/src/je_chunk.c @@ -0,0 +1,799 @@ +#define JEMALLOC_CHUNK_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +const char *opt_dss = DSS_DEFAULT; +size_t opt_lg_chunk = 0; + +/* Used exclusively for gdump triggering. */ +static size_t curchunks; +static size_t highchunks; + +rtree_t chunks_rtree; + +/* Various chunk-related settings. */ +size_t chunksize; +size_t chunksize_mask; /* (chunksize - 1). */ +size_t chunk_npages; + +static void *chunk_alloc_default(void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind); +static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, + unsigned arena_ind); +static bool chunk_commit_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_purge_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_split_default(void *chunk, size_t size, size_t size_a, + size_t size_b, bool committed, unsigned arena_ind); +static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, + size_t size_b, bool committed, unsigned arena_ind); + +const chunk_hooks_t chunk_hooks_default = { + chunk_alloc_default, + chunk_dalloc_default, + chunk_commit_default, + chunk_decommit_default, + chunk_purge_default, + chunk_split_default, + chunk_merge_default +}; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void chunk_record(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad, + extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn, + bool zeroed, bool committed); + +/******************************************************************************/ + +static chunk_hooks_t +chunk_hooks_get_locked(arena_t *arena) +{ + + return (arena->chunk_hooks); +} + +chunk_hooks_t +chunk_hooks_get(tsdn_t *tsdn, arena_t *arena) +{ + chunk_hooks_t chunk_hooks; + + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks = chunk_hooks_get_locked(arena); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + + return (chunk_hooks); +} + +chunk_hooks_t +chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks) +{ + chunk_hooks_t old_chunk_hooks; + + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + old_chunk_hooks = arena->chunk_hooks; + /* + * Copy each field atomically so that it is impossible for readers to + * see partially updated pointers. There are places where readers only + * need one hook function pointer (therefore no need to copy the + * entirety of arena->chunk_hooks), and stale reads do not affect + * correctness, so they perform unlocked reads. + */ +#define ATOMIC_COPY_HOOK(n) do { \ + union { \ + chunk_##n##_t **n; \ + void **v; \ + } u; \ + u.n = &arena->chunk_hooks.n; \ + atomic_write_p(u.v, chunk_hooks->n); \ +} while (0) + ATOMIC_COPY_HOOK(alloc); + ATOMIC_COPY_HOOK(dalloc); + ATOMIC_COPY_HOOK(commit); + ATOMIC_COPY_HOOK(decommit); + ATOMIC_COPY_HOOK(purge); + ATOMIC_COPY_HOOK(split); + ATOMIC_COPY_HOOK(merge); +#undef ATOMIC_COPY_HOOK + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + + return (old_chunk_hooks); +} + +static void +chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, bool locked) +{ + static const chunk_hooks_t uninitialized_hooks = + CHUNK_HOOKS_INITIALIZER; + + if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == + 0) { + *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : + chunk_hooks_get(tsdn, arena); + } +} + +static void +chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks) +{ + + chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true); +} + +static void +chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks) +{ + + chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false); +} + +bool +chunk_register(const void *chunk, const extent_node_t *node, bool *gdump) +{ + + assert(extent_node_addr_get(node) == chunk); + + if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) + return (true); + if (config_prof && opt_prof) { + size_t size = extent_node_size_get(node); + size_t nadd = (size == 0) ? 1 : size / chunksize; + size_t cur = atomic_add_z(&curchunks, nadd); + size_t high = atomic_read_z(&highchunks); + while (cur > high && atomic_cas_z(&highchunks, high, cur)) { + /* + * Don't refresh cur, because it may have decreased + * since this thread lost the highchunks update race. + */ + high = atomic_read_z(&highchunks); + } + *gdump = (cur > high && prof_gdump_get_unlocked()); + } + + return (false); +} + +void +chunk_deregister(const void *chunk, const extent_node_t *node) +{ + bool err; + + err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); + assert(!err); + if (config_prof && opt_prof) { + size_t size = extent_node_size_get(node); + size_t nsub = (size == 0) ? 1 : size / chunksize; + assert(atomic_read_z(&curchunks) >= nsub); + atomic_sub_z(&curchunks, nsub); + } +} + +/* + * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that + * best fits. + */ +static extent_node_t * +chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size) +{ + extent_node_t *node; + size_t qsize; + extent_node_t key; + + assert(size == CHUNK_CEILING(size)); + + qsize = extent_size_quantize_ceil(size); + extent_node_init(&key, arena, NULL, qsize, 0, false, false); + node = extent_tree_szsnad_nsearch(chunks_szsnad, &key); + assert(node == NULL || extent_node_size_get(node) >= size); + return node; +} + +static void * +chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit, bool dalloc_node) +{ + void *ret; + extent_node_t *node; + size_t alloc_size, leadsize, trailsize; + bool zeroed, committed; + + assert(CHUNK_CEILING(size) == size); + assert(alignment > 0); + assert(new_addr == NULL || alignment == chunksize); + assert(CHUNK_ADDR2BASE(new_addr) == new_addr); + /* + * Cached chunks use the node linkage embedded in their headers, in + * which case dalloc_node is true, and new_addr is non-NULL because + * we're operating on a specific chunk. + */ + assert(dalloc_node || new_addr != NULL); + + alloc_size = size + CHUNK_CEILING(alignment) - chunksize; + /* Beware size_t wrap-around. */ + if (alloc_size < size) + return (NULL); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); + if (new_addr != NULL) { + extent_node_t key; + extent_node_init(&key, arena, new_addr, alloc_size, 0, false, + false); + node = extent_tree_ad_search(chunks_ad, &key); + } else { + node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size); + } + if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < + size)) { + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + return (NULL); + } + leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), + alignment) - (uintptr_t)extent_node_addr_get(node); + assert(new_addr == NULL || leadsize == 0); + assert(extent_node_size_get(node) >= leadsize + size); + trailsize = extent_node_size_get(node) - leadsize - size; + ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); + *sn = extent_node_sn_get(node); + zeroed = extent_node_zeroed_get(node); + if (zeroed) + *zero = true; + committed = extent_node_committed_get(node); + if (committed) + *commit = true; + /* Split the lead. */ + if (leadsize != 0 && + chunk_hooks->split(extent_node_addr_get(node), + extent_node_size_get(node), leadsize, size, false, arena->ind)) { + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + return (NULL); + } + /* Remove node from the tree. */ + extent_tree_szsnad_remove(chunks_szsnad, node); + extent_tree_ad_remove(chunks_ad, node); + arena_chunk_cache_maybe_remove(arena, node, cache); + if (leadsize != 0) { + /* Insert the leading space as a smaller chunk. */ + extent_node_size_set(node, leadsize); + extent_tree_szsnad_insert(chunks_szsnad, node); + extent_tree_ad_insert(chunks_ad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + node = NULL; + } + if (trailsize != 0) { + /* Split the trail. */ + if (chunk_hooks->split(ret, size + trailsize, size, + trailsize, false, arena->ind)) { + if (dalloc_node && node != NULL) + arena_node_dalloc(tsdn, arena, node); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, + chunks_ad, cache, ret, size + trailsize, *sn, + zeroed, committed); + return (NULL); + } + /* Insert the trailing space as a smaller chunk. */ + if (node == NULL) { + node = arena_node_alloc(tsdn, arena); + if (node == NULL) { + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, + chunks_szsnad, chunks_ad, cache, ret, size + + trailsize, *sn, zeroed, committed); + return (NULL); + } + } + extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), + trailsize, *sn, zeroed, committed); + extent_tree_szsnad_insert(chunks_szsnad, node); + extent_tree_ad_insert(chunks_ad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + node = NULL; + } + if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad, + cache, ret, size, *sn, zeroed, committed); + return (NULL); + } + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + + assert(dalloc_node || node != NULL); + if (dalloc_node && node != NULL) + arena_node_dalloc(tsdn, arena, node); + if (*zero) { + if (!zeroed) + memset(ret, 0, size); + else if (config_debug) { + size_t i; + size_t *p = (size_t *)(uintptr_t)ret; + + for (i = 0; i < size / sizeof(size_t); i++) + assert(p[i] == 0); + } + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); + } + return (ret); +} + +/* + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes + * advantage of this to avoid demanding zeroed chunks, but taking advantage of + * them if they are returned. + */ +static void * +chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) +{ + void *ret; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + + /* "primary" dss. */ + if (have_dss && dss_prec == dss_prec_primary && (ret = + chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) + return (ret); + /* mmap. */ + if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != + NULL) + return (ret); + /* "secondary" dss. */ + if (have_dss && dss_prec == dss_prec_secondary && (ret = + chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) + return (ret); + + /* All strategies for allocation failed. */ + return (NULL); +} + +void * +chunk_alloc_base(size_t size) +{ + void *ret; + bool zero, commit; + + /* + * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() + * because it's critical that chunk_alloc_base() return untouched + * demand-zeroed virtual memory. + */ + zero = true; + commit = true; + ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); + if (ret == NULL) + return (NULL); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + + return (ret); +} + +void * +chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit, bool dalloc_node) +{ + void *ret; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + + ret = chunk_recycle(tsdn, arena, chunk_hooks, + &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true, + new_addr, size, alignment, sn, zero, commit, dalloc_node); + if (ret == NULL) + return (NULL); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + return (ret); +} + +static arena_t * +chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind) +{ + arena_t *arena; + + arena = arena_get(tsdn, arena_ind, false); + /* + * The arena we're allocating on behalf of must have been initialized + * already. + */ + assert(arena != NULL); + return (arena); +} + +static void * +chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) +{ + void *ret; + + ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero, + commit, arena->dss_prec); + if (ret == NULL) + return (NULL); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + + return (ret); +} + +static void * +chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit, unsigned arena_ind) +{ + tsdn_t *tsdn; + arena_t *arena; + + tsdn = tsdn_fetch(); + arena = chunk_arena_get(tsdn, arena_ind); + + return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment, + zero, commit)); +} + +static void * +chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit) +{ + void *ret; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + + ret = chunk_recycle(tsdn, arena, chunk_hooks, + &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false, + new_addr, size, alignment, sn, zero, commit, true); + + if (config_stats && ret != NULL) + arena->stats.retained -= size; + + return (ret); +} + +void * +chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, + bool *commit) +{ + void *ret; + + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); + + ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size, + alignment, sn, zero, commit); + if (ret == NULL) { + if (chunk_hooks->alloc == chunk_alloc_default) { + /* Call directly to propagate tsdn. */ + ret = chunk_alloc_default_impl(tsdn, arena, new_addr, + size, alignment, zero, commit); + } else { + ret = chunk_hooks->alloc(new_addr, size, alignment, + zero, commit, arena->ind); + } + + if (ret == NULL) + return (NULL); + + *sn = arena_extent_sn_next(arena); + + if (config_valgrind && chunk_hooks->alloc != + chunk_alloc_default) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); + } + + return (ret); +} + +static void +chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, + void *chunk, size_t size, size_t sn, bool zeroed, bool committed) +{ + bool unzeroed; + extent_node_t *node, *prev; + extent_node_t key; + + assert(!cache || !zeroed); + unzeroed = cache || !zeroed; + JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); + + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); + extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0, + false, false); + node = extent_tree_ad_nsearch(chunks_ad, &key); + /* Try to coalesce forward. */ + if (node != NULL && extent_node_addr_get(node) == + extent_node_addr_get(&key) && extent_node_committed_get(node) == + committed && !chunk_hooks->merge(chunk, size, + extent_node_addr_get(node), extent_node_size_get(node), false, + arena->ind)) { + /* + * Coalesce chunk with the following address range. This does + * not change the position within chunks_ad, so only + * remove/insert from/into chunks_szsnad. + */ + extent_tree_szsnad_remove(chunks_szsnad, node); + arena_chunk_cache_maybe_remove(arena, node, cache); + extent_node_addr_set(node, chunk); + extent_node_size_set(node, size + extent_node_size_get(node)); + if (sn < extent_node_sn_get(node)) + extent_node_sn_set(node, sn); + extent_node_zeroed_set(node, extent_node_zeroed_get(node) && + !unzeroed); + extent_tree_szsnad_insert(chunks_szsnad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + } else { + /* Coalescing forward failed, so insert a new node. */ + node = arena_node_alloc(tsdn, arena); + if (node == NULL) { + /* + * Node allocation failed, which is an exceedingly + * unlikely failure. Leak chunk after making sure its + * pages have already been purged, so that this is only + * a virtual memory leak. + */ + if (cache) { + chunk_purge_wrapper(tsdn, arena, chunk_hooks, + chunk, size, 0, size); + } + goto label_return; + } + extent_node_init(node, arena, chunk, size, sn, !unzeroed, + committed); + extent_tree_ad_insert(chunks_ad, node); + extent_tree_szsnad_insert(chunks_szsnad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + } + + /* Try to coalesce backward. */ + prev = extent_tree_ad_prev(chunks_ad, node); + if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + + extent_node_size_get(prev)) == chunk && + extent_node_committed_get(prev) == committed && + !chunk_hooks->merge(extent_node_addr_get(prev), + extent_node_size_get(prev), chunk, size, false, arena->ind)) { + /* + * Coalesce chunk with the previous address range. This does + * not change the position within chunks_ad, so only + * remove/insert node from/into chunks_szsnad. + */ + extent_tree_szsnad_remove(chunks_szsnad, prev); + extent_tree_ad_remove(chunks_ad, prev); + arena_chunk_cache_maybe_remove(arena, prev, cache); + extent_tree_szsnad_remove(chunks_szsnad, node); + arena_chunk_cache_maybe_remove(arena, node, cache); + extent_node_addr_set(node, extent_node_addr_get(prev)); + extent_node_size_set(node, extent_node_size_get(prev) + + extent_node_size_get(node)); + if (extent_node_sn_get(prev) < extent_node_sn_get(node)) + extent_node_sn_set(node, extent_node_sn_get(prev)); + extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && + extent_node_zeroed_get(node)); + extent_tree_szsnad_insert(chunks_szsnad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + + arena_node_dalloc(tsdn, arena, prev); + } + +label_return: + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); +} + +void +chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t sn, bool committed) +{ + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert(size != 0); + assert((size & chunksize_mask) == 0); + + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached, + &arena->chunks_ad_cached, true, chunk, size, sn, false, + committed); + arena_maybe_purge(tsdn, arena); +} + +static bool +chunk_dalloc_default_impl(void *chunk, size_t size) +{ + + if (!have_dss || !chunk_in_dss(chunk)) + return (chunk_dalloc_mmap(chunk, size)); + return (true); +} + +static bool +chunk_dalloc_default(void *chunk, size_t size, bool committed, + unsigned arena_ind) +{ + + return (chunk_dalloc_default_impl(chunk, size)); +} + +void +chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t sn, bool zeroed, bool committed) +{ + bool err; + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert(size != 0); + assert((size & chunksize_mask) == 0); + + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); + /* Try to deallocate. */ + if (chunk_hooks->dalloc == chunk_dalloc_default) { + /* Call directly to propagate tsdn. */ + err = chunk_dalloc_default_impl(chunk, size); + } else + err = chunk_hooks->dalloc(chunk, size, committed, arena->ind); + + if (!err) + return; + /* Try to decommit; purge if that fails. */ + if (committed) { + committed = chunk_hooks->decommit(chunk, size, 0, size, + arena->ind); + } + zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, + arena->ind); + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained, + &arena->chunks_ad_retained, false, chunk, size, sn, zeroed, + committed); + + if (config_stats) + arena->stats.retained += size; +} + +static bool +chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +static bool +chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +static bool +chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +bool +chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t offset, size_t length) +{ + + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); + return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); +} + +static bool +chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, + bool committed, unsigned arena_ind) +{ + + if (!maps_coalesce) + return (true); + return (false); +} + +static bool +chunk_merge_default_impl(void *chunk_a, void *chunk_b) +{ + + if (!maps_coalesce) + return (true); + if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b)) + return (true); + + return (false); +} + +static bool +chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + bool committed, unsigned arena_ind) +{ + + return (chunk_merge_default_impl(chunk_a, chunk_b)); +} + +static rtree_node_elm_t * +chunks_rtree_node_alloc(size_t nelms) +{ + + return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms * + sizeof(rtree_node_elm_t))); +} + +bool +chunk_boot(void) +{ +#ifdef _WIN32 + SYSTEM_INFO info; + GetSystemInfo(&info); + + /* + * Verify actual page size is equal to or an integral multiple of + * configured page size. + */ + if (info.dwPageSize & ((1U << LG_PAGE) - 1)) + return (true); + + /* + * Configure chunksize (if not set) to match granularity (usually 64K), + * so pages_map will always take fast path. + */ + if (!opt_lg_chunk) { + opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) + - 1; + } +#else + if (!opt_lg_chunk) + opt_lg_chunk = LG_CHUNK_DEFAULT; +#endif + + /* Set variables according to the value of opt_lg_chunk. */ + chunksize = (ZU(1) << opt_lg_chunk); + assert(chunksize >= PAGE); + chunksize_mask = chunksize - 1; + chunk_npages = (chunksize >> LG_PAGE); + + if (have_dss) + chunk_dss_boot(); + if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - + opt_lg_chunk), chunks_rtree_node_alloc, NULL)) + return (true); + + return (false); +} diff --git a/sources/jemalloc/src/je_chunk_dss.c b/sources/jemalloc/src/je_chunk_dss.c new file mode 100755 index 00000000..8c679395 --- /dev/null +++ b/sources/jemalloc/src/je_chunk_dss.c @@ -0,0 +1,247 @@ +#define JEMALLOC_CHUNK_DSS_C_ +#include "jemalloc/internal/jemalloc_internal.h" +/******************************************************************************/ +/* Data. */ + +const char *dss_prec_names[] = { + "disabled", + "primary", + "secondary", + "N/A" +}; + +/* + * Current dss precedence default, used when creating new arenas. NB: This is + * stored as unsigned rather than dss_prec_t because in principle there's no + * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use + * atomic operations to synchronize the setting. + */ +static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT; + +/* Base address of the DSS. */ +static void *dss_base; +/* Atomic boolean indicating whether the DSS is exhausted. */ +static unsigned dss_exhausted; +/* Atomic current upper limit on DSS addresses. */ +static void *dss_max; + +/******************************************************************************/ + +static void * +chunk_dss_sbrk(intptr_t increment) +{ + +#ifdef JEMALLOC_DSS + return (sbrk(increment)); +#else + not_implemented(); + return (NULL); +#endif +} + +dss_prec_t +chunk_dss_prec_get(void) +{ + dss_prec_t ret; + + if (!have_dss) + return (dss_prec_disabled); + ret = (dss_prec_t)atomic_read_u(&dss_prec_default); + return (ret); +} + +bool +chunk_dss_prec_set(dss_prec_t dss_prec) +{ + + if (!have_dss) + return (dss_prec != dss_prec_disabled); + atomic_write_u(&dss_prec_default, (unsigned)dss_prec); + return (false); +} + +static void * +chunk_dss_max_update(void *new_addr) +{ + void *max_cur; + spin_t spinner; + + /* + * Get the current end of the DSS as max_cur and assure that dss_max is + * up to date. + */ + spin_init(&spinner); + while (true) { + void *max_prev = atomic_read_p(&dss_max); + + max_cur = chunk_dss_sbrk(0); + if ((uintptr_t)max_prev > (uintptr_t)max_cur) { + /* + * Another thread optimistically updated dss_max. Wait + * for it to finish. + */ + spin_adaptive(&spinner); + continue; + } + if (!atomic_cas_p(&dss_max, max_prev, max_cur)) + break; + } + /* Fixed new_addr can only be supported if it is at the edge of DSS. */ + if (new_addr != NULL && max_cur != new_addr) + return (NULL); + + return (max_cur); +} + +void * +chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit) +{ + cassert(have_dss); + assert(size > 0 && (size & chunksize_mask) == 0); + assert(alignment > 0 && (alignment & chunksize_mask) == 0); + + /* + * sbrk() uses a signed increment argument, so take care not to + * interpret a huge allocation request as a negative increment. + */ + if ((intptr_t)size < 0) + return (NULL); + + if (!atomic_read_u(&dss_exhausted)) { + /* + * The loop is necessary to recover from races with other + * threads that are using the DSS for something other than + * malloc. + */ + while (true) { + void *ret, *max_cur, *dss_next, *dss_prev; + void *gap_addr_chunk, *gap_addr_subchunk; + size_t gap_size_chunk, gap_size_subchunk; + intptr_t incr; + + max_cur = chunk_dss_max_update(new_addr); + if (max_cur == NULL) + goto label_oom; + + /* + * Compute how much chunk-aligned gap space (if any) is + * necessary to satisfy alignment. This space can be + * recycled for later use. + */ + gap_addr_chunk = (void *)(CHUNK_CEILING( + (uintptr_t)max_cur)); + ret = (void *)ALIGNMENT_CEILING( + (uintptr_t)gap_addr_chunk, alignment); + gap_size_chunk = (uintptr_t)ret - + (uintptr_t)gap_addr_chunk; + /* + * Compute the address just past the end of the desired + * allocation space. + */ + dss_next = (void *)((uintptr_t)ret + size); + if ((uintptr_t)ret < (uintptr_t)max_cur || + (uintptr_t)dss_next < (uintptr_t)max_cur) + goto label_oom; /* Wrap-around. */ + /* Compute the increment, including subchunk bytes. */ + gap_addr_subchunk = max_cur; + gap_size_subchunk = (uintptr_t)ret - + (uintptr_t)gap_addr_subchunk; + incr = gap_size_subchunk + size; + + assert((uintptr_t)max_cur + incr == (uintptr_t)ret + + size); + + /* + * Optimistically update dss_max, and roll back below if + * sbrk() fails. No other thread will try to extend the + * DSS while dss_max is greater than the current DSS + * max reported by sbrk(0). + */ + if (atomic_cas_p(&dss_max, max_cur, dss_next)) + continue; + + /* Try to allocate. */ + dss_prev = chunk_dss_sbrk(incr); + if (dss_prev == max_cur) { + /* Success. */ + if (gap_size_chunk != 0) { + chunk_hooks_t chunk_hooks = + CHUNK_HOOKS_INITIALIZER; + chunk_dalloc_wrapper(tsdn, arena, + &chunk_hooks, gap_addr_chunk, + gap_size_chunk, + arena_extent_sn_next(arena), false, + true); + } + if (*zero) { + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( + ret, size); + memset(ret, 0, size); + } + if (!*commit) + *commit = pages_decommit(ret, size); + return (ret); + } + + /* + * Failure, whether due to OOM or a race with a raw + * sbrk() call from outside the allocator. Try to roll + * back optimistic dss_max update; if rollback fails, + * it's due to another caller of this function having + * succeeded since this invocation started, in which + * case rollback is not necessary. + */ + atomic_cas_p(&dss_max, dss_next, max_cur); + if (dss_prev == (void *)-1) { + /* OOM. */ + atomic_write_u(&dss_exhausted, (unsigned)true); + goto label_oom; + } + } + } +label_oom: + return (NULL); +} + +static bool +chunk_in_dss_helper(void *chunk, void *max) +{ + + return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < + (uintptr_t)max); +} + +bool +chunk_in_dss(void *chunk) +{ + + cassert(have_dss); + + return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max))); +} + +bool +chunk_dss_mergeable(void *chunk_a, void *chunk_b) +{ + void *max; + + cassert(have_dss); + + max = atomic_read_p(&dss_max); + return (chunk_in_dss_helper(chunk_a, max) == + chunk_in_dss_helper(chunk_b, max)); +} + +void +chunk_dss_boot(void) +{ + + cassert(have_dss); + + dss_base = chunk_dss_sbrk(0); + dss_exhausted = (unsigned)(dss_base == (void *)-1); + dss_max = dss_base; +} + +/******************************************************************************/ diff --git a/sources/jemalloc/src/je_chunk_mmap.c b/sources/jemalloc/src/je_chunk_mmap.c new file mode 100755 index 00000000..73fc497a --- /dev/null +++ b/sources/jemalloc/src/je_chunk_mmap.c @@ -0,0 +1,78 @@ +#define JEMALLOC_CHUNK_MMAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +static void * +chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) +{ + void *ret; + size_t alloc_size; + + alloc_size = size + alignment - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size < size) + return (NULL); + do { + void *pages; + size_t leadsize; + pages = pages_map(NULL, alloc_size, commit); + if (pages == NULL) + return (NULL); + leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - + (uintptr_t)pages; + ret = pages_trim(pages, alloc_size, leadsize, size, commit); + } while (ret == NULL); + + assert(ret != NULL); + *zero = true; + return (ret); +} + +void * +chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit) +{ + void *ret; + size_t offset; + + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in one or two calls to + * pages_unmap(). + * + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. + */ + + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + + ret = pages_map(new_addr, size, commit); + if (ret == NULL || ret == new_addr) + return (ret); + assert(new_addr == NULL); + offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); + if (offset != 0) { + pages_unmap(ret, size); + return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); + } + + assert(ret != NULL); + *zero = true; + return (ret); +} + +bool +chunk_dalloc_mmap(void *chunk, size_t size) +{ + + if (config_munmap) + pages_unmap(chunk, size); + + return (!config_munmap); +} diff --git a/sources/jemalloc/src/je_ckh.c b/sources/jemalloc/src/je_ckh.c new file mode 100755 index 00000000..159bd8ae --- /dev/null +++ b/sources/jemalloc/src/je_ckh.c @@ -0,0 +1,569 @@ +/* + ******************************************************************************* + * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each + * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash + * functions are employed. The original cuckoo hashing algorithm was described + * in: + * + * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms + * 51(2):122-144. + * + * Generalization of cuckoo hashing was discussed in: + * + * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical + * alternative to traditional hash tables. In Proceedings of the 7th + * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, + * January 2006. + * + * This implementation uses precisely two hash functions because that is the + * fewest that can work, and supporting multiple hashes is an implementation + * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) + * that shows approximate expected maximum load factors for various + * configurations: + * + * | #cells/bucket | + * #hashes | 1 | 2 | 4 | 8 | + * --------+-------+-------+-------+-------+ + * 1 | 0.006 | 0.006 | 0.03 | 0.12 | + * 2 | 0.49 | 0.86 |>0.93< |>0.96< | + * 3 | 0.91 | 0.97 | 0.98 | 0.999 | + * 4 | 0.97 | 0.99 | 0.999 | | + * + * The number of cells per bucket is chosen such that a bucket fits in one cache + * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, + * respectively. + * + ******************************************************************************/ +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); +static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); + +/******************************************************************************/ + +/* + * Search bucket for key and return the cell number if found; SIZE_T_MAX + * otherwise. + */ +JEMALLOC_INLINE_C size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) +{ + ckhc_t *cell; + unsigned i; + + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + if (cell->key != NULL && ckh->keycomp(key, cell->key)) + return ((bucket << LG_CKH_BUCKET_CELLS) + i); + } + + return (SIZE_T_MAX); +} + +/* + * Search table for key and return cell number if found; SIZE_T_MAX otherwise. + */ +JEMALLOC_INLINE_C size_t +ckh_isearch(ckh_t *ckh, const void *key) +{ + size_t hashes[2], bucket, cell; + + assert(ckh != NULL); + + ckh->hash(key, hashes); + + /* Search primary bucket. */ + bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + if (cell != SIZE_T_MAX) + return (cell); + + /* Search secondary bucket. */ + bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + return (cell); +} + +JEMALLOC_INLINE_C bool +ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, + const void *data) +{ + ckhc_t *cell; + unsigned offset, i; + + /* + * Cycle through the cells in the bucket, starting at a random position. + * The randomness avoids worst-case search overhead as buckets fill up. + */ + offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; + if (cell->key == NULL) { + cell->key = key; + cell->data = data; + ckh->count++; + return (false); + } + } + + return (true); +} + +/* + * No space is available in bucket. Randomly evict an item, then try to find an + * alternate location for that item. Iteratively repeat this + * eviction/relocation procedure until either success or detection of an + * eviction/relocation bucket cycle. + */ +JEMALLOC_INLINE_C bool +ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, + void const **argdata) +{ + const void *key, *data, *tkey, *tdata; + ckhc_t *cell; + size_t hashes[2], bucket, tbucket; + unsigned i; + + bucket = argbucket; + key = *argkey; + data = *argdata; + while (true) { + /* + * Choose a random item within the bucket to evict. This is + * critical to correct function, because without (eventually) + * evicting all items within a bucket during iteration, it + * would be possible to get stuck in an infinite loop if there + * were an item for which both hashes indicated the same + * bucket. + */ + i = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + assert(cell->key != NULL); + + /* Swap cell->{key,data} and {key,data} (evict). */ + tkey = cell->key; tdata = cell->data; + cell->key = key; cell->data = data; + key = tkey; data = tdata; + +#ifdef CKH_COUNT + ckh->nrelocs++; +#endif + + /* Find the alternate bucket for the evicted item. */ + ckh->hash(key, hashes); + tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (tbucket == bucket) { + tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) + - 1); + /* + * It may be that (tbucket == bucket) still, if the + * item's hashes both indicate this bucket. However, + * we are guaranteed to eventually escape this bucket + * during iteration, assuming pseudo-random item + * selection (true randomness would make infinite + * looping a remote possibility). The reason we can + * never get trapped forever is that there are two + * cases: + * + * 1) This bucket == argbucket, so we will quickly + * detect an eviction cycle and terminate. + * 2) An item was evicted to this bucket from another, + * which means that at least one item in this bucket + * has hashes that indicate distinct buckets. + */ + } + /* Check for a cycle. */ + if (tbucket == argbucket) { + *argkey = key; + *argdata = data; + return (true); + } + + bucket = tbucket; + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + return (false); + } +} + +JEMALLOC_INLINE_C bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) +{ + size_t hashes[2], bucket; + const void *key = *argkey; + const void *data = *argdata; + + ckh->hash(key, hashes); + + /* Try to insert in primary bucket. */ + bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + return (false); + + /* Try to insert in secondary bucket. */ + bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + return (false); + + /* + * Try to find a place for this item via iterative eviction/relocation. + */ + return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); +} + +/* + * Try to rebuild the hash table from scratch by inserting all items from the + * old table into the new. + */ +JEMALLOC_INLINE_C bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) +{ + size_t count, i, nins; + const void *key, *data; + + count = ckh->count; + ckh->count = 0; + for (i = nins = 0; nins < count; i++) { + if (aTab[i].key != NULL) { + key = aTab[i].key; + data = aTab[i].data; + if (ckh_try_insert(ckh, &key, &data)) { + ckh->count = count; + return (true); + } + nins++; + } + } + + return (false); +} + +static bool +ckh_grow(tsd_t *tsd, ckh_t *ckh) +{ + bool ret; + ckhc_t *tab, *ttab; + unsigned lg_prevbuckets, lg_curcells; + +#ifdef CKH_COUNT + ckh->ngrows++; +#endif + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table will have to be doubled more than once in order to create a + * usable table. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; + while (true) { + size_t usize; + + lg_curcells++; + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { + ret = true; + goto label_return; + } + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, + true, NULL, true, arena_ichoose(tsd, NULL)); + if (tab == NULL) { + ret = true; + goto label_return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); + break; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; + } + + ret = false; +label_return: + return (ret); +} + +static void +ckh_shrink(tsd_t *tsd, ckh_t *ckh) +{ + ckhc_t *tab, *ttab; + size_t usize; + unsigned lg_prevbuckets, lg_curcells; + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table rebuild will fail. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + return; + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, + true, arena_ichoose(tsd, NULL)); + if (tab == NULL) { + /* + * An OOM error isn't worth propagating, since it doesn't + * prevent this or future operations from proceeding. + */ + return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); +#ifdef CKH_COUNT + ckh->nshrinks++; +#endif + return; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; +#ifdef CKH_COUNT + ckh->nshrinkfails++; +#endif +} + +bool +ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp) +{ + bool ret; + size_t mincells, usize; + unsigned lg_mincells; + + assert(minitems > 0); + assert(hash != NULL); + assert(keycomp != NULL); + +#ifdef CKH_COUNT + ckh->ngrows = 0; + ckh->nshrinks = 0; + ckh->nshrinkfails = 0; + ckh->ninserts = 0; + ckh->nrelocs = 0; +#endif + ckh->prng_state = 42; /* Value doesn't really matter. */ + ckh->count = 0; + + /* + * Find the minimum power of 2 that is large enough to fit minitems + * entries. We are using (2+,2) cuckoo hashing, which has an expected + * maximum load factor of at least ~0.86, so 0.75 is a conservative load + * factor that will typically allow mincells items to fit without ever + * growing the table. + */ + assert(LG_CKH_BUCKET_CELLS > 0); + mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; + for (lg_mincells = LG_CKH_BUCKET_CELLS; + (ZU(1) << lg_mincells) < mincells; + lg_mincells++) + ; /* Do nothing. */ + ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->hash = hash; + ckh->keycomp = keycomp; + + usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { + ret = true; + goto label_return; + } + ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, + NULL, true, arena_ichoose(tsd, NULL)); + if (ckh->tab == NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return (ret); +} + +void +ckh_delete(tsd_t *tsd, ckh_t *ckh) +{ + + assert(ckh != NULL); + +#ifdef CKH_VERBOSE + malloc_printf( + "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," + " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," + " nrelocs: %"FMTu64"\n", __func__, ckh, + (unsigned long long)ckh->ngrows, + (unsigned long long)ckh->nshrinks, + (unsigned long long)ckh->nshrinkfails, + (unsigned long long)ckh->ninserts, + (unsigned long long)ckh->nrelocs); +#endif + + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); + if (config_debug) + memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); +} + +size_t +ckh_count(ckh_t *ckh) +{ + + assert(ckh != NULL); + + return (ckh->count); +} + +bool +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) +{ + size_t i, ncells; + + for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { + if (ckh->tab[i].key != NULL) { + if (key != NULL) + *key = (void *)ckh->tab[i].key; + if (data != NULL) + *data = (void *)ckh->tab[i].data; + *tabind = i + 1; + return (false); + } + } + + return (true); +} + +bool +ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) +{ + bool ret; + + assert(ckh != NULL); + assert(ckh_search(ckh, key, NULL, NULL)); + +#ifdef CKH_COUNT + ckh->ninserts++; +#endif + + while (ckh_try_insert(ckh, &key, &data)) { + if (ckh_grow(tsd, ckh)) { + ret = true; + goto label_return; + } + } + + ret = false; +label_return: + return (ret); +} + +bool +ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data) +{ + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) + *key = (void *)ckh->tab[cell].key; + if (data != NULL) + *data = (void *)ckh->tab[cell].data; + ckh->tab[cell].key = NULL; + ckh->tab[cell].data = NULL; /* Not necessary. */ + + ckh->count--; + /* Try to halve the table if it is less than 1/4 full. */ + if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets + > ckh->lg_minbuckets) { + /* Ignore error due to OOM. */ + ckh_shrink(tsd, ckh); + } + + return (false); + } + + return (true); +} + +bool +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) +{ + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) + *key = (void *)ckh->tab[cell].key; + if (data != NULL) + *data = (void *)ckh->tab[cell].data; + return (false); + } + + return (true); +} + +void +ckh_string_hash(const void *key, size_t r_hash[2]) +{ + + hash(key, strlen((const char *)key), 0x94122f33U, r_hash); +} + +bool +ckh_string_keycomp(const void *k1, const void *k2) +{ + + assert(k1 != NULL); + assert(k2 != NULL); + + return (strcmp((char *)k1, (char *)k2) ? false : true); +} + +void +ckh_pointer_hash(const void *key, size_t r_hash[2]) +{ + union { + const void *v; + size_t i; + } u; + + assert(sizeof(u.v) == sizeof(u.i)); + u.v = key; + hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); +} + +bool +ckh_pointer_keycomp(const void *k1, const void *k2) +{ + + return ((k1 == k2) ? true : false); +} diff --git a/sources/jemalloc/src/je_ctl.c b/sources/jemalloc/src/je_ctl.c new file mode 100755 index 00000000..56bc4f4c --- /dev/null +++ b/sources/jemalloc/src/je_ctl.c @@ -0,0 +1,2258 @@ +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +/* + * ctl_mtx protects the following: + * - ctl_stats.* + */ +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; +static uint64_t ctl_epoch; +static ctl_stats_t ctl_stats; + +/******************************************************************************/ +/* Helpers for named and indexed nodes. */ + +JEMALLOC_INLINE_C const ctl_named_node_t * +ctl_named_node(const ctl_node_t *node) +{ + + return ((node->named) ? (const ctl_named_node_t *)node : NULL); +} + +JEMALLOC_INLINE_C const ctl_named_node_t * +ctl_named_children(const ctl_named_node_t *node, size_t index) +{ + const ctl_named_node_t *children = ctl_named_node(node->children); + + return (children ? &children[index] : NULL); +} + +JEMALLOC_INLINE_C const ctl_indexed_node_t * +ctl_indexed_node(const ctl_node_t *node) +{ + + return (!node->named ? (const ctl_indexed_node_t *)node : NULL); +} + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +#define CTL_PROTO(n) \ +static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ + const size_t *mib, size_t miblen, size_t i); + +static bool ctl_arena_init(ctl_arena_stats_t *astats); +static void ctl_arena_clear(ctl_arena_stats_t *astats); +static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, + arena_t *arena); +static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, + ctl_arena_stats_t *astats); +static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i); +static bool ctl_grow(tsdn_t *tsdn); +static void ctl_refresh(tsdn_t *tsdn); +static bool ctl_init(tsdn_t *tsdn); +static int ctl_lookup(tsdn_t *tsdn, const char *name, + ctl_node_t const **nodesp, size_t *mibp, size_t *depthp); + +CTL_PROTO(version) +CTL_PROTO(epoch) +CTL_PROTO(thread_tcache_enabled) +CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_prof_name) +CTL_PROTO(thread_prof_active) +CTL_PROTO(thread_arena) +CTL_PROTO(thread_allocated) +CTL_PROTO(thread_allocatedp) +CTL_PROTO(thread_deallocated) +CTL_PROTO(thread_deallocatedp) +CTL_PROTO(config_cache_oblivious) +CTL_PROTO(config_debug) +CTL_PROTO(config_fill) +CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_malloc_conf) +CTL_PROTO(config_munmap) +CTL_PROTO(config_prof) +CTL_PROTO(config_prof_libgcc) +CTL_PROTO(config_prof_libunwind) +CTL_PROTO(config_stats) +CTL_PROTO(config_tcache) +CTL_PROTO(config_thp) +CTL_PROTO(config_tls) +CTL_PROTO(config_utrace) +CTL_PROTO(config_valgrind) +CTL_PROTO(config_xmalloc) +CTL_PROTO(opt_abort) +CTL_PROTO(opt_dss) +CTL_PROTO(opt_lg_chunk) +CTL_PROTO(opt_narenas) +CTL_PROTO(opt_purge) +CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_decay_time) +CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_junk) +CTL_PROTO(opt_zero) +CTL_PROTO(opt_quarantine) +CTL_PROTO(opt_redzone) +CTL_PROTO(opt_utrace) +CTL_PROTO(opt_xmalloc) +CTL_PROTO(opt_tcache) +CTL_PROTO(opt_lg_tcache_max) +CTL_PROTO(opt_thp) +CTL_PROTO(opt_prof) +CTL_PROTO(opt_prof_prefix) +CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_prof_thread_active_init) +CTL_PROTO(opt_lg_prof_sample) +CTL_PROTO(opt_lg_prof_interval) +CTL_PROTO(opt_prof_gdump) +CTL_PROTO(opt_prof_final) +CTL_PROTO(opt_prof_leak) +CTL_PROTO(opt_prof_accum) +CTL_PROTO(tcache_create) +CTL_PROTO(tcache_flush) +CTL_PROTO(tcache_destroy) +static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all); +CTL_PROTO(arena_i_purge) +CTL_PROTO(arena_i_decay) +CTL_PROTO(arena_i_reset) +CTL_PROTO(arena_i_dss) +CTL_PROTO(arena_i_lg_dirty_mult) +CTL_PROTO(arena_i_decay_time) +CTL_PROTO(arena_i_chunk_hooks) +INDEX_PROTO(arena_i) +CTL_PROTO(arenas_bin_i_size) +CTL_PROTO(arenas_bin_i_nregs) +CTL_PROTO(arenas_bin_i_run_size) +INDEX_PROTO(arenas_bin_i) +CTL_PROTO(arenas_lrun_i_size) +INDEX_PROTO(arenas_lrun_i) +CTL_PROTO(arenas_hchunk_i_size) +INDEX_PROTO(arenas_hchunk_i) +CTL_PROTO(arenas_narenas) +CTL_PROTO(arenas_initialized) +CTL_PROTO(arenas_lg_dirty_mult) +CTL_PROTO(arenas_decay_time) +CTL_PROTO(arenas_quantum) +CTL_PROTO(arenas_page) +CTL_PROTO(arenas_tcache_max) +CTL_PROTO(arenas_nbins) +CTL_PROTO(arenas_nhbins) +CTL_PROTO(arenas_nlruns) +CTL_PROTO(arenas_nhchunks) +CTL_PROTO(arenas_extend) +CTL_PROTO(prof_thread_active_init) +CTL_PROTO(prof_active) +CTL_PROTO(prof_dump) +CTL_PROTO(prof_gdump) +CTL_PROTO(prof_reset) +CTL_PROTO(prof_interval) +CTL_PROTO(lg_prof_sample) +CTL_PROTO(stats_arenas_i_small_allocated) +CTL_PROTO(stats_arenas_i_small_nmalloc) +CTL_PROTO(stats_arenas_i_small_ndalloc) +CTL_PROTO(stats_arenas_i_small_nrequests) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_large_nrequests) +CTL_PROTO(stats_arenas_i_huge_allocated) +CTL_PROTO(stats_arenas_i_huge_nmalloc) +CTL_PROTO(stats_arenas_i_huge_ndalloc) +CTL_PROTO(stats_arenas_i_huge_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_nmalloc) +CTL_PROTO(stats_arenas_i_bins_j_ndalloc) +CTL_PROTO(stats_arenas_i_bins_j_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_curregs) +CTL_PROTO(stats_arenas_i_bins_j_nfills) +CTL_PROTO(stats_arenas_i_bins_j_nflushes) +CTL_PROTO(stats_arenas_i_bins_j_nruns) +CTL_PROTO(stats_arenas_i_bins_j_nreruns) +CTL_PROTO(stats_arenas_i_bins_j_curruns) +INDEX_PROTO(stats_arenas_i_bins_j) +CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) +CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) +CTL_PROTO(stats_arenas_i_lruns_j_nrequests) +CTL_PROTO(stats_arenas_i_lruns_j_curruns) +INDEX_PROTO(stats_arenas_i_lruns_j) +CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc) +CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc) +CTL_PROTO(stats_arenas_i_hchunks_j_nrequests) +CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks) +INDEX_PROTO(stats_arenas_i_hchunks_j) +CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_dss) +CTL_PROTO(stats_arenas_i_lg_dirty_mult) +CTL_PROTO(stats_arenas_i_decay_time) +CTL_PROTO(stats_arenas_i_pactive) +CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_retained) +CTL_PROTO(stats_arenas_i_npurge) +CTL_PROTO(stats_arenas_i_nmadvise) +CTL_PROTO(stats_arenas_i_purged) +CTL_PROTO(stats_arenas_i_metadata_mapped) +CTL_PROTO(stats_arenas_i_metadata_allocated) +INDEX_PROTO(stats_arenas_i) +CTL_PROTO(stats_cactive) +CTL_PROTO(stats_allocated) +CTL_PROTO(stats_active) +CTL_PROTO(stats_metadata) +CTL_PROTO(stats_resident) +CTL_PROTO(stats_mapped) +CTL_PROTO(stats_retained) + +/******************************************************************************/ +/* mallctl tree. */ + +/* Maximum tree depth. */ +#define CTL_MAX_DEPTH 6 + +#define NAME(n) {true}, n +#define CHILD(t, c) \ + sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ + (ctl_node_t *)c##_node, \ + NULL +#define CTL(c) 0, NULL, c##_ctl + +/* + * Only handles internal indexed nodes, since there are currently no external + * ones. + */ +#define INDEX(i) {false}, i##_index + +static const ctl_named_node_t thread_tcache_node[] = { + {NAME("enabled"), CTL(thread_tcache_enabled)}, + {NAME("flush"), CTL(thread_tcache_flush)} +}; + +static const ctl_named_node_t thread_prof_node[] = { + {NAME("name"), CTL(thread_prof_name)}, + {NAME("active"), CTL(thread_prof_active)} +}; + +static const ctl_named_node_t thread_node[] = { + {NAME("arena"), CTL(thread_arena)}, + {NAME("allocated"), CTL(thread_allocated)}, + {NAME("allocatedp"), CTL(thread_allocatedp)}, + {NAME("deallocated"), CTL(thread_deallocated)}, + {NAME("deallocatedp"), CTL(thread_deallocatedp)}, + {NAME("tcache"), CHILD(named, thread_tcache)}, + {NAME("prof"), CHILD(named, thread_prof)} +}; + +static const ctl_named_node_t config_node[] = { + {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, + {NAME("debug"), CTL(config_debug)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("malloc_conf"), CTL(config_malloc_conf)}, + {NAME("munmap"), CTL(config_munmap)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("tcache"), CTL(config_tcache)}, + {NAME("thp"), CTL(config_thp)}, + {NAME("tls"), CTL(config_tls)}, + {NAME("utrace"), CTL(config_utrace)}, + {NAME("valgrind"), CTL(config_valgrind)}, + {NAME("xmalloc"), CTL(config_xmalloc)} +}; + +static const ctl_named_node_t opt_node[] = { + {NAME("abort"), CTL(opt_abort)}, + {NAME("dss"), CTL(opt_dss)}, + {NAME("lg_chunk"), CTL(opt_lg_chunk)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("purge"), CTL(opt_purge)}, + {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, + {NAME("decay_time"), CTL(opt_decay_time)}, + {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("junk"), CTL(opt_junk)}, + {NAME("zero"), CTL(opt_zero)}, + {NAME("quarantine"), CTL(opt_quarantine)}, + {NAME("redzone"), CTL(opt_redzone)}, + {NAME("utrace"), CTL(opt_utrace)}, + {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, + {NAME("thp"), CTL(opt_thp)}, + {NAME("prof"), CTL(opt_prof)}, + {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_final"), CTL(opt_prof_final)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_accum"), CTL(opt_prof_accum)} +}; + +static const ctl_named_node_t tcache_node[] = { + {NAME("create"), CTL(tcache_create)}, + {NAME("flush"), CTL(tcache_flush)}, + {NAME("destroy"), CTL(tcache_destroy)} +}; + +static const ctl_named_node_t arena_i_node[] = { + {NAME("purge"), CTL(arena_i_purge)}, + {NAME("decay"), CTL(arena_i_decay)}, + {NAME("reset"), CTL(arena_i_reset)}, + {NAME("dss"), CTL(arena_i_dss)}, + {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, + {NAME("decay_time"), CTL(arena_i_decay_time)}, + {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} +}; +static const ctl_named_node_t super_arena_i_node[] = { + {NAME(""), CHILD(named, arena_i)} +}; + +static const ctl_indexed_node_t arena_node[] = { + {INDEX(arena_i)} +}; + +static const ctl_named_node_t arenas_bin_i_node[] = { + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("run_size"), CTL(arenas_bin_i_run_size)} +}; +static const ctl_named_node_t super_arenas_bin_i_node[] = { + {NAME(""), CHILD(named, arenas_bin_i)} +}; + +static const ctl_indexed_node_t arenas_bin_node[] = { + {INDEX(arenas_bin_i)} +}; + +static const ctl_named_node_t arenas_lrun_i_node[] = { + {NAME("size"), CTL(arenas_lrun_i_size)} +}; +static const ctl_named_node_t super_arenas_lrun_i_node[] = { + {NAME(""), CHILD(named, arenas_lrun_i)} +}; + +static const ctl_indexed_node_t arenas_lrun_node[] = { + {INDEX(arenas_lrun_i)} +}; + +static const ctl_named_node_t arenas_hchunk_i_node[] = { + {NAME("size"), CTL(arenas_hchunk_i_size)} +}; +static const ctl_named_node_t super_arenas_hchunk_i_node[] = { + {NAME(""), CHILD(named, arenas_hchunk_i)} +}; + +static const ctl_indexed_node_t arenas_hchunk_node[] = { + {INDEX(arenas_hchunk_i)} +}; + +static const ctl_named_node_t arenas_node[] = { + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("initialized"), CTL(arenas_initialized)}, + {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, + {NAME("decay_time"), CTL(arenas_decay_time)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("page"), CTL(arenas_page)}, + {NAME("tcache_max"), CTL(arenas_tcache_max)}, + {NAME("nbins"), CTL(arenas_nbins)}, + {NAME("nhbins"), CTL(arenas_nhbins)}, + {NAME("bin"), CHILD(indexed, arenas_bin)}, + {NAME("nlruns"), CTL(arenas_nlruns)}, + {NAME("lrun"), CHILD(indexed, arenas_lrun)}, + {NAME("nhchunks"), CTL(arenas_nhchunks)}, + {NAME("hchunk"), CHILD(indexed, arenas_hchunk)}, + {NAME("extend"), CTL(arenas_extend)} +}; + +static const ctl_named_node_t prof_node[] = { + {NAME("thread_active_init"), CTL(prof_thread_active_init)}, + {NAME("active"), CTL(prof_active)}, + {NAME("dump"), CTL(prof_dump)}, + {NAME("gdump"), CTL(prof_gdump)}, + {NAME("reset"), CTL(prof_reset)}, + {NAME("interval"), CTL(prof_interval)}, + {NAME("lg_sample"), CTL(lg_prof_sample)} +}; + +static const ctl_named_node_t stats_arenas_i_metadata_node[] = { + {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)}, + {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)} +}; + +static const ctl_named_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} +}; + +static const ctl_named_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} +}; + +static const ctl_named_node_t stats_arenas_i_huge_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)} +}; + +static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, + {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, + {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, + {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, + {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} +}; +static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_bins_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { + {INDEX(stats_arenas_i_bins_j)} +}; + +static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, + {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} +}; +static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { + {INDEX(stats_arenas_i_lruns_j)} +}; + +static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)}, + {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)} +}; +static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = { + {INDEX(stats_arenas_i_hchunks_j)} +}; + +static const ctl_named_node_t stats_arenas_i_node[] = { + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("dss"), CTL(stats_arenas_i_dss)}, + {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, + {NAME("decay_time"), CTL(stats_arenas_i_decay_time)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, + {NAME("npurge"), CTL(stats_arenas_i_npurge)}, + {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, + {NAME("purged"), CTL(stats_arenas_i_purged)}, + {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, + {NAME("small"), CHILD(named, stats_arenas_i_small)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, + {NAME("huge"), CHILD(named, stats_arenas_i_huge)}, + {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, + {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}, + {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)} +}; +static const ctl_named_node_t super_stats_arenas_i_node[] = { + {NAME(""), CHILD(named, stats_arenas_i)} +}; + +static const ctl_indexed_node_t stats_arenas_node[] = { + {INDEX(stats_arenas_i)} +}; + +static const ctl_named_node_t stats_node[] = { + {NAME("cactive"), CTL(stats_cactive)}, + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("metadata"), CTL(stats_metadata)}, + {NAME("resident"), CTL(stats_resident)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, + {NAME("arenas"), CHILD(indexed, stats_arenas)} +}; + +static const ctl_named_node_t root_node[] = { + {NAME("version"), CTL(version)}, + {NAME("epoch"), CTL(epoch)}, + {NAME("thread"), CHILD(named, thread)}, + {NAME("config"), CHILD(named, config)}, + {NAME("opt"), CHILD(named, opt)}, + {NAME("tcache"), CHILD(named, tcache)}, + {NAME("arena"), CHILD(indexed, arena)}, + {NAME("arenas"), CHILD(named, arenas)}, + {NAME("prof"), CHILD(named, prof)}, + {NAME("stats"), CHILD(named, stats)} +}; +static const ctl_named_node_t super_root_node[] = { + {NAME(""), CHILD(named, root)} +}; + +#undef NAME +#undef CHILD +#undef CTL +#undef INDEX + +/******************************************************************************/ + +static bool +ctl_arena_init(ctl_arena_stats_t *astats) +{ + + if (astats->lstats == NULL) { + astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * + sizeof(malloc_large_stats_t)); + if (astats->lstats == NULL) + return (true); + } + + if (astats->hstats == NULL) { + astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * + sizeof(malloc_huge_stats_t)); + if (astats->hstats == NULL) + return (true); + } + + return (false); +} + +static void +ctl_arena_clear(ctl_arena_stats_t *astats) +{ + + astats->nthreads = 0; + astats->dss = dss_prec_names[dss_prec_limit]; + astats->lg_dirty_mult = -1; + astats->decay_time = -1; + astats->pactive = 0; + astats->pdirty = 0; + if (config_stats) { + memset(&astats->astats, 0, sizeof(arena_stats_t)); + astats->allocated_small = 0; + astats->nmalloc_small = 0; + astats->ndalloc_small = 0; + astats->nrequests_small = 0; + memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); + memset(astats->lstats, 0, nlclasses * + sizeof(malloc_large_stats_t)); + memset(astats->hstats, 0, nhclasses * + sizeof(malloc_huge_stats_t)); + } +} + +static void +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena) +{ + unsigned i; + + if (config_stats) { + arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss, + &cstats->lg_dirty_mult, &cstats->decay_time, + &cstats->pactive, &cstats->pdirty, &cstats->astats, + cstats->bstats, cstats->lstats, cstats->hstats); + + for (i = 0; i < NBINS; i++) { + cstats->allocated_small += cstats->bstats[i].curregs * + index2size(i); + cstats->nmalloc_small += cstats->bstats[i].nmalloc; + cstats->ndalloc_small += cstats->bstats[i].ndalloc; + cstats->nrequests_small += cstats->bstats[i].nrequests; + } + } else { + arena_basic_stats_merge(tsdn, arena, &cstats->nthreads, + &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time, + &cstats->pactive, &cstats->pdirty); + } +} + +static void +ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) +{ + unsigned i; + + sstats->nthreads += astats->nthreads; + sstats->pactive += astats->pactive; + sstats->pdirty += astats->pdirty; + + if (config_stats) { + sstats->astats.mapped += astats->astats.mapped; + sstats->astats.retained += astats->astats.retained; + sstats->astats.npurge += astats->astats.npurge; + sstats->astats.nmadvise += astats->astats.nmadvise; + sstats->astats.purged += astats->astats.purged; + + sstats->astats.metadata_mapped += + astats->astats.metadata_mapped; + sstats->astats.metadata_allocated += + astats->astats.metadata_allocated; + + sstats->allocated_small += astats->allocated_small; + sstats->nmalloc_small += astats->nmalloc_small; + sstats->ndalloc_small += astats->ndalloc_small; + sstats->nrequests_small += astats->nrequests_small; + + sstats->astats.allocated_large += + astats->astats.allocated_large; + sstats->astats.nmalloc_large += astats->astats.nmalloc_large; + sstats->astats.ndalloc_large += astats->astats.ndalloc_large; + sstats->astats.nrequests_large += + astats->astats.nrequests_large; + + sstats->astats.allocated_huge += astats->astats.allocated_huge; + sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; + sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; + + for (i = 0; i < NBINS; i++) { + sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sstats->bstats[i].nrequests += + astats->bstats[i].nrequests; + sstats->bstats[i].curregs += astats->bstats[i].curregs; + if (config_tcache) { + sstats->bstats[i].nfills += + astats->bstats[i].nfills; + sstats->bstats[i].nflushes += + astats->bstats[i].nflushes; + } + sstats->bstats[i].nruns += astats->bstats[i].nruns; + sstats->bstats[i].reruns += astats->bstats[i].reruns; + sstats->bstats[i].curruns += astats->bstats[i].curruns; + } + + for (i = 0; i < nlclasses; i++) { + sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; + sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; + sstats->lstats[i].nrequests += + astats->lstats[i].nrequests; + sstats->lstats[i].curruns += astats->lstats[i].curruns; + } + + for (i = 0; i < nhclasses; i++) { + sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; + sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; + sstats->hstats[i].curhchunks += + astats->hstats[i].curhchunks; + } + } +} + +static void +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i) +{ + ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; + ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; + + ctl_arena_clear(astats); + ctl_arena_stats_amerge(tsdn, astats, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_smerge(sstats, astats); +} + +static bool +ctl_grow(tsdn_t *tsdn) +{ + ctl_arena_stats_t *astats; + + /* Initialize new arena. */ + if (arena_init(tsdn, ctl_stats.narenas) == NULL) + return (true); + + /* Allocate extended arena stats. */ + astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * + sizeof(ctl_arena_stats_t)); + if (astats == NULL) + return (true); + + /* Initialize the new astats element. */ + memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * + sizeof(ctl_arena_stats_t)); + memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); + if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { + a0dalloc(astats); + return (true); + } + /* Swap merged stats to their new location. */ + { + ctl_arena_stats_t tstats; + memcpy(&tstats, &astats[ctl_stats.narenas], + sizeof(ctl_arena_stats_t)); + memcpy(&astats[ctl_stats.narenas], + &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); + memcpy(&astats[ctl_stats.narenas + 1], &tstats, + sizeof(ctl_arena_stats_t)); + } + a0dalloc(ctl_stats.arenas); + ctl_stats.arenas = astats; + ctl_stats.narenas++; + + return (false); +} + +static void +ctl_refresh(tsdn_t *tsdn) +{ + unsigned i; + VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); + + /* + * Clear sum stats, since they will be merged into by + * ctl_arena_refresh(). + */ + ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); + + for (i = 0; i < ctl_stats.narenas; i++) + tarenas[i] = arena_get(tsdn, i, false); + + for (i = 0; i < ctl_stats.narenas; i++) { + bool initialized = (tarenas[i] != NULL); + + ctl_stats.arenas[i].initialized = initialized; + if (initialized) + ctl_arena_refresh(tsdn, tarenas[i], i); + } + + if (config_stats) { + size_t base_allocated, base_resident, base_mapped; + base_stats_get(tsdn, &base_allocated, &base_resident, + &base_mapped); + ctl_stats.allocated = + ctl_stats.arenas[ctl_stats.narenas].allocated_small + + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; + ctl_stats.active = + (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); + ctl_stats.metadata = base_allocated + + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + + ctl_stats.arenas[ctl_stats.narenas].astats + .metadata_allocated; + ctl_stats.resident = base_resident + + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + + ((ctl_stats.arenas[ctl_stats.narenas].pactive + + ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); + ctl_stats.mapped = base_mapped + + ctl_stats.arenas[ctl_stats.narenas].astats.mapped; + ctl_stats.retained = + ctl_stats.arenas[ctl_stats.narenas].astats.retained; + } + + ctl_epoch++; +} + +static bool +ctl_init(tsdn_t *tsdn) +{ + bool ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (!ctl_initialized) { + /* + * Allocate space for one extra arena stats element, which + * contains summed stats across all arenas. + */ + ctl_stats.narenas = narenas_total_get(); + ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( + (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); + if (ctl_stats.arenas == NULL) { + ret = true; + goto label_return; + } + memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * + sizeof(ctl_arena_stats_t)); + + /* + * Initialize all stats structures, regardless of whether they + * ever get used. Lazy initialization would allow errors to + * cause inconsistent state to be viewable by the application. + */ + if (config_stats) { + unsigned i; + for (i = 0; i <= ctl_stats.narenas; i++) { + if (ctl_arena_init(&ctl_stats.arenas[i])) { + unsigned j; + for (j = 0; j < i; j++) { + a0dalloc( + ctl_stats.arenas[j].lstats); + a0dalloc( + ctl_stats.arenas[j].hstats); + } + a0dalloc(ctl_stats.arenas); + ctl_stats.arenas = NULL; + ret = true; + goto label_return; + } + } + } + ctl_stats.arenas[ctl_stats.narenas].initialized = true; + + ctl_epoch = 0; + ctl_refresh(tsdn); + ctl_initialized = true; + } + + ret = false; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return (ret); +} + +static int +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp) +{ + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; + const ctl_named_node_t *node; + + elm = name; + /* Equivalent to strchrnul(). */ + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + if (elen == 0) { + ret = ENOENT; + goto label_return; + } + node = super_root_node; + for (i = 0; i < *depthp; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + const ctl_named_node_t *pnode = node; + + /* Children are named. */ + for (j = 0; j < node->nchildren; j++) { + const ctl_named_node_t *child = + ctl_named_children(node, j); + if (strlen(child->name) == elen && + strncmp(elm, child->name, elen) == 0) { + node = child; + if (nodesp != NULL) + nodesp[i] = + (const ctl_node_t *)node; + mibp[i] = j; + break; + } + } + if (node == pnode) { + ret = ENOENT; + goto label_return; + } + } else { + uintmax_t index; + const ctl_indexed_node_t *inode; + + /* Children are indexed. */ + index = malloc_strtoumax(elm, NULL, 10); + if (index == UINTMAX_MAX || index > SIZE_T_MAX) { + ret = ENOENT; + goto label_return; + } + + inode = ctl_indexed_node(node->children); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + + if (nodesp != NULL) + nodesp[i] = (const ctl_node_t *)node; + mibp[i] = (size_t)index; + } + + if (node->ctl != NULL) { + /* Terminal node. */ + if (*dot != '\0') { + /* + * The name contains more elements than are + * in this path through the tree. + */ + ret = ENOENT; + goto label_return; + } + /* Complete lookup successful. */ + *depthp = i + 1; + break; + } + + /* Update elm. */ + if (*dot == '\0') { + /* No more elements. */ + ret = ENOENT; + goto label_return; + } + elm = &dot[1]; + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : + strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + } + + ret = 0; +label_return: + return (ret); +} + +int +ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + size_t depth; + ctl_node_t const *nodes[CTL_MAX_DEPTH]; + size_t mib[CTL_MAX_DEPTH]; + const ctl_named_node_t *node; + + if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { + ret = EAGAIN; + goto label_return; + } + + depth = CTL_MAX_DEPTH; + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); + if (ret != 0) + goto label_return; + + node = ctl_named_node(nodes[depth-1]); + if (node != NULL && node->ctl) + ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); + else { + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +int +ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) +{ + int ret; + + if (!ctl_initialized && ctl_init(tsdn)) { + ret = EAGAIN; + goto label_return; + } + + ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp); +label_return: + return(ret); +} + +int +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + const ctl_named_node_t *node; + size_t i; + + if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { + ret = EAGAIN; + goto label_return; + } + + /* Iterate down the tree. */ + node = super_root_node; + for (i = 0; i < miblen; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + /* Children are named. */ + if (node->nchildren <= (unsigned)mib[i]) { + ret = ENOENT; + goto label_return; + } + node = ctl_named_children(node, mib[i]); + } else { + const ctl_indexed_node_t *inode; + + /* Indexed element. */ + inode = ctl_indexed_node(node->children); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + } + } + + /* Call the ctl function. */ + if (node && node->ctl) + ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + else { + /* Partial MIB. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +bool +ctl_boot(void) +{ + + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) + return (true); + + ctl_initialized = false; + + return (false); +} + +void +ctl_prefork(tsdn_t *tsdn) +{ + + malloc_mutex_prefork(tsdn, &ctl_mtx); +} + +void +ctl_postfork_parent(tsdn_t *tsdn) +{ + + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); +} + +void +ctl_postfork_child(tsdn_t *tsdn) +{ + + malloc_mutex_postfork_child(tsdn, &ctl_mtx); +} + +/******************************************************************************/ +/* *_ctl() functions. */ + +#define READONLY() do { \ + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define WRITEONLY() do { \ + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ_XOR_WRITE() do { \ + if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ + newlen != 0)) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ(v, t) do { \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) : *oldlenp; \ + memcpy(oldp, (void *)&(v), copylen); \ + ret = EINVAL; \ + goto label_return; \ + } \ + *(t *)oldp = (v); \ + } \ +} while (0) + +#define WRITE(v, t) do { \ + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + (v) = *(t *)newp; \ + } \ +} while (0) + +/* + * There's a lot of code duplication in the following macros due to limitations + * in how nested cpp macros are expanded. + */ +#define CTL_RO_CLGEN(c, l, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if (!(c)) \ + return (ENOENT); \ + if (l) \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + if (l) \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return (ret); \ +} + +#define CTL_RO_CGEN(c, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if (!(c)) \ + return (ENOENT); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return (ret); \ +} + +#define CTL_RO_GEN(n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return (ret); \ +} + +/* + * ctl_mtx is not acquired, under the assumption that no pertinent data will + * mutate during the call. + */ +#define CTL_RO_NL_CGEN(c, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if (!(c)) \ + return (ENOENT); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +#define CTL_RO_NL_GEN(n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if (!(c)) \ + return (ENOENT); \ + READONLY(); \ + oldval = (m(tsd)); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +#define CTL_RO_CONFIG_GEN(n, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = n; \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +/******************************************************************************/ + +CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) + +static int +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + UNUSED uint64_t newval; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(newval, uint64_t); + if (newp != NULL) + ctl_refresh(tsd_tsdn(tsd)); + READ(ctl_epoch, uint64_t); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return (ret); +} + +/******************************************************************************/ + +CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) +CTL_RO_CONFIG_GEN(config_debug, bool) +CTL_RO_CONFIG_GEN(config_fill, bool) +CTL_RO_CONFIG_GEN(config_lazy_lock, bool) +CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) +CTL_RO_CONFIG_GEN(config_munmap, bool) +CTL_RO_CONFIG_GEN(config_prof, bool) +CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) +CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) +CTL_RO_CONFIG_GEN(config_stats, bool) +CTL_RO_CONFIG_GEN(config_tcache, bool) +CTL_RO_CONFIG_GEN(config_thp, bool) +CTL_RO_CONFIG_GEN(config_tls, bool) +CTL_RO_CONFIG_GEN(config_utrace, bool) +CTL_RO_CONFIG_GEN(config_valgrind, bool) +CTL_RO_CONFIG_GEN(config_xmalloc, bool) + +/******************************************************************************/ + +CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) +CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) +CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *) +CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t) +CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) +CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) +CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) +CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) +CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) +CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) +CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) +CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_CGEN(config_thp, opt_thp, opt_thp, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) +CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, + opt_prof_thread_active_init, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) + +/******************************************************************************/ + +static int +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + arena_t *oldarena; + unsigned newind, oldind; + + oldarena = arena_choose(tsd, NULL); + if (oldarena == NULL) + return (EAGAIN); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + newind = oldind = oldarena->ind; + WRITE(newind, unsigned); + READ(oldind, unsigned); + if (newind != oldind) { + arena_t *newarena; + + if (newind >= ctl_stats.narenas) { + /* New arena index is out of range. */ + ret = EFAULT; + goto label_return; + } + + /* Initialize arena if necessary. */ + newarena = arena_get(tsd_tsdn(tsd), newind, true); + if (newarena == NULL) { + ret = EAGAIN; + goto label_return; + } + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + if (config_tcache) { + tcache_t *tcache = tsd_tcache_get(tsd); + if (tcache != NULL) { + tcache_arena_reassociate(tsd_tsdn(tsd), tcache, + oldarena, newarena); + } + } + } + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return (ret); +} + +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, + uint64_t *) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, + tsd_thread_deallocatedp_get, uint64_t *) + +static int +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_tcache) + return (ENOENT); + + oldval = tcache_enabled_get(); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + tcache_enabled_set(*(bool *)newp); + } + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (!config_tcache) + return (ENOENT); + + READONLY(); + WRITEONLY(); + + tcache_flush(); + + ret = 0; +label_return: + return (ret); +} + +static int +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (!config_prof) + return (ENOENT); + + READ_XOR_WRITE(); + + if (newp != NULL) { + if (newlen != sizeof(const char *)) { + ret = EINVAL; + goto label_return; + } + + if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != + 0) + goto label_return; + } else { + const char *oldname = prof_thread_name_get(tsd); + READ(oldname, const char *); + } + + ret = 0; +label_return: + return (ret); +} + +static int +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + oldval = prof_thread_active_get(tsd); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + if (prof_thread_active_set(tsd, *(bool *)newp)) { + ret = EAGAIN; + goto label_return; + } + } + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +/******************************************************************************/ + +static int +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned tcache_ind; + + if (!config_tcache) + return (ENOENT); + + READONLY(); + if (tcaches_create(tsd, &tcache_ind)) { + ret = EFAULT; + goto label_return; + } + READ(tcache_ind, unsigned); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned tcache_ind; + + if (!config_tcache) + return (ENOENT); + + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_flush(tsd, tcache_ind); + + ret = 0; +label_return: + return (ret); +} + +static int +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned tcache_ind; + + if (!config_tcache) + return (ENOENT); + + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_destroy(tsd, tcache_ind); + + ret = 0; +label_return: + return (ret); +} + +/******************************************************************************/ + +static void +arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) +{ + + malloc_mutex_lock(tsdn, &ctl_mtx); + { + unsigned narenas = ctl_stats.narenas; + + if (arena_ind == narenas) { + unsigned i; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); + + for (i = 0; i < narenas; i++) + tarenas[i] = arena_get(tsdn, i, false); + + /* + * No further need to hold ctl_mtx, since narenas and + * tarenas contain everything needed below. + */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) + arena_purge(tsdn, tarenas[i], all); + } + } else { + arena_t *tarena; + + assert(arena_ind < narenas); + + tarena = arena_get(tsdn, arena_ind, false); + + /* No further need to hold ctl_mtx. */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + if (tarena != NULL) + arena_purge(tsdn, tarena, all); + } + } +} + +static int +arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + READONLY(); + WRITEONLY(); + arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true); + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + READONLY(); + WRITEONLY(); + arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false); + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind; + arena_t *arena; + + READONLY(); + WRITEONLY(); + + if ((config_valgrind && unlikely(in_valgrind)) || (config_fill && + unlikely(opt_quarantine))) { + ret = EFAULT; + goto label_return; + } + + arena_ind = (unsigned)mib[1]; + if (config_debug) { + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + assert(arena_ind < ctl_stats.narenas); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + } + assert(arena_ind >= opt_narenas); + + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + + arena_reset(tsd, arena); + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + const char *dss = NULL; + unsigned arena_ind = (unsigned)mib[1]; + dss_prec_t dss_prec_old = dss_prec_limit; + dss_prec_t dss_prec = dss_prec_limit; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(dss, const char *); + if (dss != NULL) { + int i; + bool match = false; + + for (i = 0; i < dss_prec_limit; i++) { + if (strcmp(dss_prec_names[i], dss) == 0) { + dss_prec = i; + match = true; + break; + } + } + + if (!match) { + ret = EINVAL; + goto label_return; + } + } + + if (arena_ind < ctl_stats.narenas) { + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL || (dss_prec != dss_prec_limit && + arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena); + } else { + if (dss_prec != dss_prec_limit && + chunk_dss_prec_set(dss_prec)) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = chunk_dss_prec_get(); + } + + dss = dss_prec_names[dss_prec_old]; + READ(dss, const char *); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return (ret); +} + +static int +arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind = (unsigned)mib[1]; + arena_t *arena; + + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind = (unsigned)mib[1]; + arena_t *arena; + + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_decay_time_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind = (unsigned)mib[1]; + arena_t *arena; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { + if (newp != NULL) { + chunk_hooks_t old_chunk_hooks, new_chunk_hooks; + WRITE(new_chunk_hooks, chunk_hooks_t); + old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena, + &new_chunk_hooks); + READ(old_chunk_hooks, chunk_hooks_t); + } else { + chunk_hooks_t old_chunk_hooks = + chunk_hooks_get(tsd_tsdn(tsd), arena); + READ(old_chunk_hooks, chunk_hooks_t); + } + } else { + ret = EFAULT; + goto label_return; + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return (ret); +} + +static const ctl_named_node_t * +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) +{ + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (i > ctl_stats.narenas) { + ret = NULL; + goto label_return; + } + + ret = super_arena_i_node; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return (ret); +} + +/******************************************************************************/ + +static int +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned narenas; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + READONLY(); + if (*oldlenp != sizeof(unsigned)) { + ret = EINVAL; + goto label_return; + } + narenas = ctl_stats.narenas; + READ(narenas, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return (ret); +} + +static int +arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned nread, i; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + READONLY(); + if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { + ret = EINVAL; + nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) + ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas; + } else { + ret = 0; + nread = ctl_stats.narenas; + } + + for (i = 0; i < nread; i++) + ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; + +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return (ret); +} + +static int +arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = arena_lg_dirty_mult_default_get(); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return (ret); +} + +static int +arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = arena_decay_time_default_get(); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_decay_time_default_set(*(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return (ret); +} + +CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) +CTL_RO_NL_GEN(arenas_page, PAGE, size_t) +CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) +CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) +CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) +CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) +static const ctl_named_node_t * +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) +{ + + if (i > NBINS) + return (NULL); + return (super_arenas_bin_i_node); +} + +CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) +CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) +static const ctl_named_node_t * +arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nlclasses) + return (NULL); + return (super_arenas_lrun_i_node); +} + +CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) +CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]), + size_t) +static const ctl_named_node_t * +arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nhclasses) + return (NULL); + return (super_arenas_hchunk_i_node); +} + +static int +arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned narenas; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + READONLY(); + if (ctl_grow(tsd_tsdn(tsd))) { + ret = EAGAIN; + goto label_return; + } + narenas = ctl_stats.narenas - 1; + READ(narenas, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return (ret); +} + +/******************************************************************************/ + +static int +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); + } else + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); + } else + oldval = prof_active_get(tsd_tsdn(tsd)); + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + const char *filename = NULL; + + if (!config_prof) + return (ENOENT); + + WRITEONLY(); + WRITE(filename, const char *); + + if (prof_mdump(tsd, filename)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return (ret); +} + +static int +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); + } else + oldval = prof_gdump_get(tsd_tsdn(tsd)); + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + size_t lg_sample = lg_prof_sample; + + if (!config_prof) + return (ENOENT); + + WRITEONLY(); + WRITE(lg_sample, size_t); + if (lg_sample >= (sizeof(uint64_t) << 3)) + lg_sample = (sizeof(uint64_t) << 3) - 1; + + prof_reset(tsd, lg_sample); + + ret = 0; +label_return: + return (ret); +} + +CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) +CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) + +/******************************************************************************/ + +CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) +CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) +CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t) + +CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) +CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, + ssize_t) +CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time, + ssize_t) +CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, + ctl_stats.arenas[mib[2]].astats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_retained, + ctl_stats.arenas[mib[2]].astats.retained, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, + ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, + ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_purged, + ctl_stats.arenas[mib[2]].astats.purged, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped, + ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated, + ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, + ctl_stats.arenas[mib[2]].allocated_small, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, + ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, + ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, + ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, + ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, + ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */ + +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, + ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, + ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t) +CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) +CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) + +static const ctl_named_node_t * +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) +{ + + if (j > NBINS) + return (NULL); + return (super_stats_arenas_i_bins_j_node); +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, + ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) + +static const ctl_named_node_t * +stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) +{ + + if (j > nlclasses) + return (NULL); + return (super_stats_arenas_i_lruns_j_node); +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, + ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc, + ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, + ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */ + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, + ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) + +static const ctl_named_node_t * +stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) +{ + + if (j > nhclasses) + return (NULL); + return (super_stats_arenas_i_hchunks_j_node); +} + +static const ctl_named_node_t * +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) +{ + const ctl_named_node_t * ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { + ret = NULL; + goto label_return; + } + + ret = super_stats_arenas_i_node; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return (ret); +} diff --git a/sources/jemalloc/src/je_extent.c b/sources/jemalloc/src/je_extent.c new file mode 100755 index 00000000..ff8de2fe --- /dev/null +++ b/sources/jemalloc/src/je_extent.c @@ -0,0 +1,96 @@ +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_floor(size_t size) { + size_t ret; + szind_t ind; + + assert(size > 0); + + ind = size2index(size + 1); + if (ind == 0) { + /* Avoid underflow. */ + return (index2size(0)); + } + ret = index2size(ind - 1); + assert(ret <= size); + return (ret); +} + +size_t +extent_size_quantize_ceil(size_t size) { + size_t ret; + + assert(size > 0); + + ret = extent_size_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large extent, + * because under-sized extents may be mixed in. This only + * happens when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = index2size(size2index(ret + 1)); + } + return ret; +} + +JEMALLOC_INLINE_C int +extent_sz_comp(const extent_node_t *a, const extent_node_t *b) +{ + size_t a_qsize = extent_size_quantize_floor(extent_node_size_get(a)); + size_t b_qsize = extent_size_quantize_floor(extent_node_size_get(b)); + + return ((a_qsize > b_qsize) - (a_qsize < b_qsize)); +} + +JEMALLOC_INLINE_C int +extent_sn_comp(const extent_node_t *a, const extent_node_t *b) +{ + size_t a_sn = extent_node_sn_get(a); + size_t b_sn = extent_node_sn_get(b); + + return ((a_sn > b_sn) - (a_sn < b_sn)); +} + +JEMALLOC_INLINE_C int +extent_ad_comp(const extent_node_t *a, const extent_node_t *b) +{ + uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); + uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); + + return ((a_addr > b_addr) - (a_addr < b_addr)); +} + +JEMALLOC_INLINE_C int +extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b) +{ + int ret; + + ret = extent_sz_comp(a, b); + if (ret != 0) + return (ret); + + ret = extent_sn_comp(a, b); + if (ret != 0) + return (ret); + + ret = extent_ad_comp(a, b); + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link, + extent_szsnad_comp) + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) diff --git a/sources/jemalloc/src/je_hash.c b/sources/jemalloc/src/je_hash.c new file mode 100755 index 00000000..cfa4da02 --- /dev/null +++ b/sources/jemalloc/src/je_hash.c @@ -0,0 +1,2 @@ +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_huge.c b/sources/jemalloc/src/je_huge.c new file mode 100755 index 00000000..0fbaa41a --- /dev/null +++ b/sources/jemalloc/src/je_huge.c @@ -0,0 +1,498 @@ +#define JEMALLOC_HUGE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +static extent_node_t * +huge_node_get(const void *ptr) +{ + extent_node_t *node; + + node = chunk_lookup(ptr, true); + assert(!extent_node_achunk_get(node)); + + return (node); +} + +static bool +huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump) +{ + + assert(extent_node_addr_get(node) == ptr); + assert(!extent_node_achunk_get(node)); + return (chunk_register(ptr, node, gdump)); +} + +static void +huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump) +{ + bool err; + + err = huge_node_set(tsdn, ptr, node, gdump); + assert(!err); +} + +static void +huge_node_unset(const void *ptr, const extent_node_t *node) +{ + + chunk_deregister(ptr, node); +} + +void * +huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) +{ + + assert(usize == s2u(usize)); + + return (huge_palloc(tsdn, arena, usize, chunksize, zero)); +} + +void * +huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero) +{ + void *ret; + size_t ausize; + arena_t *iarena; + extent_node_t *node; + size_t sn; + bool is_zeroed, gdump; + + /* Allocate one or more contiguous chunks for this request. */ + + assert(!tsdn_null(tsdn) || arena != NULL); + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); + + ausize = sa2u(usize, alignment); + if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) + return (NULL); + assert(ausize >= chunksize); + + /* Allocate an extent node with which to track the chunk. */ + iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : + a0get(); + node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)), + CACHELINE, false, NULL, true, iarena); + if (node == NULL) + return (NULL); + + /* + * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that + * it is possible to make correct junk/zero fill decisions below. + */ + is_zeroed = zero; + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); + if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, + arena, usize, alignment, &sn, &is_zeroed)) == NULL) { + idalloctm(tsdn, node, NULL, true, true); + return (NULL); + } + + extent_node_init(node, arena, ret, usize, sn, is_zeroed, true); + + if (huge_node_set(tsdn, ret, node, &gdump)) { + arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn); + idalloctm(tsdn, node, NULL, true, true); + return (NULL); + } + if (config_prof && opt_prof && gdump) + prof_gdump(tsdn); + + /* Insert node into huge. */ + malloc_mutex_lock(tsdn, &arena->huge_mtx); + ql_elm_new(node, ql_link); + ql_tail_insert(&arena->huge, node, ql_link); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); + + if (zero || (config_fill && unlikely(opt_zero))) { + if (!is_zeroed) + memset(ret, 0, usize); + } else if (config_fill && unlikely(opt_junk_alloc)) + memset(ret, JEMALLOC_ALLOC_JUNK, usize); + + arena_decay_tick(tsdn, arena); + return (ret); +} + +#ifdef JEMALLOC_JET +#undef huge_dalloc_junk +#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) +#endif +static void +huge_dalloc_junk(void *ptr, size_t usize) +{ + + if (config_fill && have_dss && unlikely(opt_junk_free)) { + /* + * Only bother junk filling if the chunk isn't about to be + * unmapped. + */ + if (!config_munmap || (have_dss && chunk_in_dss(ptr))) + memset(ptr, JEMALLOC_FREE_JUNK, usize); + } +} +#ifdef JEMALLOC_JET +#undef huge_dalloc_junk +#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) +huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); +#endif + +static void +huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize_min, size_t usize_max, bool zero) +{ + size_t usize, usize_next; + extent_node_t *node; + arena_t *arena; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + bool pre_zeroed, post_zeroed, gdump; + + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); + + /* Increase usize to incorporate extra. */ + for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) + <= oldsize; usize = usize_next) + ; /* Do nothing. */ + + if (oldsize == usize) + return; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + pre_zeroed = extent_node_zeroed_get(node); + + /* Fill if necessary (shrinking). */ + if (oldsize > usize) { + size_t sdiff = oldsize - usize; + if (config_fill && unlikely(opt_junk_free)) { + memset((void *)((uintptr_t)ptr + usize), + JEMALLOC_FREE_JUNK, sdiff); + post_zeroed = false; + } else { + post_zeroed = !chunk_purge_wrapper(tsdn, arena, + &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, + sdiff); + } + } else + post_zeroed = pre_zeroed; + + malloc_mutex_lock(tsdn, &arena->huge_mtx); + /* Update the size of the huge allocation. */ + huge_node_unset(ptr, node); + assert(extent_node_size_get(node) != usize); + extent_node_size_set(node, usize); + huge_node_reset(tsdn, ptr, node, &gdump); + /* Update zeroed. */ + extent_node_zeroed_set(node, post_zeroed); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); + /* gdump without any locks held. */ + if (config_prof && opt_prof && gdump) + prof_gdump(tsdn); + + arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize); + + /* Fill if necessary (growing). */ + if (oldsize < usize) { + if (zero || (config_fill && unlikely(opt_zero))) { + if (!pre_zeroed) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + usize - oldsize); + } + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)ptr + oldsize), + JEMALLOC_ALLOC_JUNK, usize - oldsize); + } + } +} + +static bool +huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize) +{ + extent_node_t *node; + arena_t *arena; + chunk_hooks_t chunk_hooks; + size_t cdiff; + bool pre_zeroed, post_zeroed, gdump; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + pre_zeroed = extent_node_zeroed_get(node); + chunk_hooks = chunk_hooks_get(tsdn, arena); + + assert(oldsize > usize); + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); + + /* Split excess chunks. */ + cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); + if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), + CHUNK_CEILING(usize), cdiff, true, arena->ind)) + return (true); + + if (oldsize > usize) { + size_t sdiff = oldsize - usize; + if (config_fill && unlikely(opt_junk_free)) { + huge_dalloc_junk((void *)((uintptr_t)ptr + usize), + sdiff); + post_zeroed = false; + } else { + post_zeroed = !chunk_purge_wrapper(tsdn, arena, + &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + + usize), CHUNK_CEILING(oldsize), + CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); + } + } else + post_zeroed = pre_zeroed; + + malloc_mutex_lock(tsdn, &arena->huge_mtx); + /* Update the size of the huge allocation. */ + huge_node_unset(ptr, node); + extent_node_size_set(node, usize); + huge_node_reset(tsdn, ptr, node, &gdump); + /* Update zeroed. */ + extent_node_zeroed_set(node, post_zeroed); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); + /* gdump without any locks held. */ + if (config_prof && opt_prof && gdump) + prof_gdump(tsdn); + + /* Zap the excess chunks. */ + arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize, + extent_node_sn_get(node)); + + return (false); +} + +static bool +huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize, bool zero) { + extent_node_t *node; + arena_t *arena; + bool is_zeroed_subchunk, is_zeroed_chunk, gdump; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + malloc_mutex_lock(tsdn, &arena->huge_mtx); + is_zeroed_subchunk = extent_node_zeroed_get(node); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); + + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); + + /* + * Use is_zeroed_chunk to detect whether the trailing memory is zeroed, + * update extent's zeroed field, and zero as necessary. + */ + is_zeroed_chunk = false; + if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize, + &is_zeroed_chunk)) + return (true); + + malloc_mutex_lock(tsdn, &arena->huge_mtx); + huge_node_unset(ptr, node); + extent_node_size_set(node, usize); + extent_node_zeroed_set(node, extent_node_zeroed_get(node) && + is_zeroed_chunk); + huge_node_reset(tsdn, ptr, node, &gdump); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); + /* gdump without any locks held. */ + if (config_prof && opt_prof && gdump) + prof_gdump(tsdn); + + if (zero || (config_fill && unlikely(opt_zero))) { + if (!is_zeroed_subchunk) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + CHUNK_CEILING(oldsize) - oldsize); + } + if (!is_zeroed_chunk) { + memset((void *)((uintptr_t)ptr + + CHUNK_CEILING(oldsize)), 0, usize - + CHUNK_CEILING(oldsize)); + } + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, + usize - oldsize); + } + + return (false); +} + +bool +huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) +{ + + assert(s2u(oldsize) == oldsize); + /* The following should have been caught by callers. */ + assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS); + + /* Both allocations must be huge to avoid a move. */ + if (oldsize < chunksize || usize_max < chunksize) + return (true); + + if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { + /* Attempt to expand the allocation in-place. */ + if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max, + zero)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); + return (false); + } + /* Try again, this time with usize_min. */ + if (usize_min < usize_max && CHUNK_CEILING(usize_min) > + CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn, + ptr, oldsize, usize_min, zero)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); + return (false); + } + } + + /* + * Avoid moving the allocation if the existing chunk size accommodates + * the new size. + */ + if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) + && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { + huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min, + usize_max, zero); + arena_decay_tick(tsdn, huge_aalloc(ptr)); + return (false); + } + + /* Attempt to shrink the allocation in-place. */ + if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { + if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize, + usize_max)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); + return (false); + } + } + return (true); +} + +static void * +huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero) +{ + + if (alignment <= chunksize) + return (huge_malloc(tsdn, arena, usize, zero)); + return (huge_palloc(tsdn, arena, usize, alignment, zero)); +} + +void * +huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache) +{ + void *ret; + size_t copysize; + + /* The following should have been caught by callers. */ + assert(usize > 0 && usize <= HUGE_MAXCLASS); + + /* Try to avoid moving the allocation. */ + if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize, + zero)) + return (ptr); + + /* + * usize and oldsize are different enough that we need to use a + * different size class. In that case, fall back to allocating new + * space and copying. + */ + ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment, + zero); + if (ret == NULL) + return (NULL); + + copysize = (usize < oldsize) ? usize : oldsize; + memcpy(ret, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache, true); + return (ret); +} + +void +huge_dalloc(tsdn_t *tsdn, void *ptr) +{ + extent_node_t *node; + arena_t *arena; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + huge_node_unset(ptr, node); + malloc_mutex_lock(tsdn, &arena->huge_mtx); + ql_remove(&arena->huge, node, ql_link); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); + + huge_dalloc_junk(extent_node_addr_get(node), + extent_node_size_get(node)); + arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), + extent_node_addr_get(node), extent_node_size_get(node), + extent_node_sn_get(node)); + idalloctm(tsdn, node, NULL, true, true); + + arena_decay_tick(tsdn, arena); +} + +arena_t * +huge_aalloc(const void *ptr) +{ + + return (extent_node_arena_get(huge_node_get(ptr))); +} + +size_t +huge_salloc(tsdn_t *tsdn, const void *ptr) +{ + size_t size; + extent_node_t *node; + arena_t *arena; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + malloc_mutex_lock(tsdn, &arena->huge_mtx); + size = extent_node_size_get(node); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); + + return (size); +} + +prof_tctx_t * +huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) +{ + prof_tctx_t *tctx; + extent_node_t *node; + arena_t *arena; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + malloc_mutex_lock(tsdn, &arena->huge_mtx); + tctx = extent_node_prof_tctx_get(node); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); + + return (tctx); +} + +void +huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) +{ + extent_node_t *node; + arena_t *arena; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + malloc_mutex_lock(tsdn, &arena->huge_mtx); + extent_node_prof_tctx_set(node, tctx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); +} + +void +huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr) +{ + + huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U); +} diff --git a/sources/jemalloc/src/je_jemalloc.c b/sources/jemalloc/src/je_jemalloc.c new file mode 100755 index 00000000..f73a26cd --- /dev/null +++ b/sources/jemalloc/src/je_jemalloc.c @@ -0,0 +1,2925 @@ +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +/* Runtime configuration options. */ +const char *je_malloc_conf +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; +bool opt_abort = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +const char *opt_junk = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + "true" +#else + "false" +#endif + ; +bool opt_junk_alloc = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; +bool opt_junk_free = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; + +size_t opt_quarantine = ZU(0); +bool opt_redzone = false; +bool opt_utrace = false; +bool opt_xmalloc = false; +bool opt_zero = false; +unsigned opt_narenas = 0; + +/* Initialized to true if the process is running inside Valgrind. */ +bool in_valgrind; + +unsigned ncpus; + +/* Protects arenas initialization. */ +static malloc_mutex_t arenas_lock; +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + * + * arenas[0..narenas_auto) are used for automatic multiplexing of threads and + * arenas. arenas[narenas_auto..narenas_total) are only used if the application + * takes some action to create them and allocate from them. + */ +arena_t **arenas; +static unsigned narenas_total; /* Use narenas_total_*(). */ +static arena_t *a0; /* arenas[0]; read-only after initialization. */ +unsigned narenas_auto; /* Read-only after initialization. */ + +typedef enum { + malloc_init_uninitialized = 3, + malloc_init_a0_initialized = 2, + malloc_init_recursible = 1, + malloc_init_initialized = 0 /* Common case --> jnz. */ +} malloc_init_t; +static malloc_init_t malloc_init_state = malloc_init_uninitialized; + +/* False should be the common case. Set to true to trigger initialization. */ +static bool malloc_slow = true; + +/* When malloc_slow is true, set the corresponding bits for sanity check. */ +enum { + flag_opt_junk_alloc = (1U), + flag_opt_junk_free = (1U << 1), + flag_opt_quarantine = (1U << 2), + flag_opt_zero = (1U << 3), + flag_opt_utrace = (1U << 4), + flag_in_valgrind = (1U << 5), + flag_opt_xmalloc = (1U << 6) +}; +static uint8_t malloc_slow_flags; + +JEMALLOC_ALIGNED(CACHELINE) +const size_t pind2sz_tab[NPSIZES] = { +#define PSZ_yes(lg_grp, ndelta, lg_delta) \ + (((ZU(1)<= 0x0600 +static malloc_mutex_t init_lock = SRWLOCK_INIT; +#else +static malloc_mutex_t init_lock; +static bool init_lock_initialized = false; + +JEMALLOC_ATTR(constructor) +static void WINAPI +_init_init_lock(void) +{ + + /* If another constructor in the same binary is using mallctl to + * e.g. setup chunk hooks, it may end up running before this one, + * and malloc_init_hard will crash trying to lock the uninitialized + * lock. So we force an initialization of the lock in + * malloc_init_hard as well. We don't try to care about atomicity + * of the accessed to the init_lock_initialized boolean, since it + * really only matters early in the process creation, before any + * separate thread normally starts doing anything. */ + if (!init_lock_initialized) + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); + init_lock_initialized = true; +} + +#ifdef _MSC_VER +# pragma section(".CRT$XCU", read) +JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) +static const void (WINAPI *init_init_lock)(void) = _init_init_lock; +#endif +#endif +#else +static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; +#endif + +typedef struct { + void *p; /* Input pointer (as in realloc(p, s)). */ + size_t s; /* Request size. */ + void *r; /* Result pointer. */ +} malloc_utrace_t; + +#ifdef JEMALLOC_UTRACE +# define UTRACE(a, b, c) do { \ + if (unlikely(opt_utrace)) { \ + int utrace_serrno = errno; \ + malloc_utrace_t ut; \ + ut.p = (a); \ + ut.s = (b); \ + ut.r = (c); \ + utrace(&ut, sizeof(ut)); \ + errno = utrace_serrno; \ + } \ +} while (0) +#else +# define UTRACE(a, b, c) +#endif + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static bool malloc_init_hard_a0(void); +static bool malloc_init_hard(void); + +/******************************************************************************/ +/* + * Begin miscellaneous support functions. + */ + +JEMALLOC_ALWAYS_INLINE_C bool +malloc_initialized(void) +{ + + return (malloc_init_state == malloc_init_initialized); +} + +JEMALLOC_ALWAYS_INLINE_C void +malloc_thread_init(void) +{ + + /* + * TSD initialization can't be safely done as a side effect of + * deallocation, because it is possible for a thread to do nothing but + * deallocate its TLS data via free(), in which case writing to TLS + * would cause write-after-free memory corruption. The quarantine + * facility *only* gets used as a side effect of deallocation, so make + * a best effort attempt at initializing its TSD by hooking all + * allocation events. + */ + if (config_fill && unlikely(opt_quarantine)) + quarantine_alloc_hook(); +} + +JEMALLOC_ALWAYS_INLINE_C bool +malloc_init_a0(void) +{ + + if (unlikely(malloc_init_state == malloc_init_uninitialized)) + return (malloc_init_hard_a0()); + return (false); +} + +JEMALLOC_ALWAYS_INLINE_C bool +malloc_init(void) +{ + + if (unlikely(!malloc_initialized()) && malloc_init_hard()) + return (true); + malloc_thread_init(); + + return (false); +} + +/* + * The a0*() functions are used instead of i{d,}alloc() in situations that + * cannot tolerate TLS variable access. + */ + +static void * +a0ialloc(size_t size, bool zero, bool is_metadata) +{ + + if (unlikely(malloc_init_a0())) + return (NULL); + + return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, + is_metadata, arena_get(TSDN_NULL, 0, true), true)); +} + +static void +a0idalloc(void *ptr, bool is_metadata) +{ + + idalloctm(TSDN_NULL, ptr, false, is_metadata, true); +} + +arena_t * +a0get(void) +{ + + return (a0); +} + +void * +a0malloc(size_t size) +{ + + return (a0ialloc(size, false, true)); +} + +void +a0dalloc(void *ptr) +{ + + a0idalloc(ptr, true); +} + +/* + * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive + * situations that cannot tolerate TLS variable access (TLS allocation and very + * early internal data structure initialization). + */ + +void * +bootstrap_malloc(size_t size) +{ + + if (unlikely(size == 0)) + size = 1; + + return (a0ialloc(size, false, false)); +} + +void * +bootstrap_calloc(size_t num, size_t size) +{ + size_t num_size; + + num_size = num * size; + if (unlikely(num_size == 0)) { + assert(num == 0 || size == 0); + num_size = 1; + } + + return (a0ialloc(num_size, true, false)); +} + +void +bootstrap_free(void *ptr) +{ + + if (unlikely(ptr == NULL)) + return; + + a0idalloc(ptr, false); +} + +static void +arena_set(unsigned ind, arena_t *arena) +{ + + atomic_write_p((void **)&arenas[ind], arena); +} + +static void +narenas_total_set(unsigned narenas) +{ + + atomic_write_u(&narenas_total, narenas); +} + +static void +narenas_total_inc(void) +{ + + atomic_add_u(&narenas_total, 1); +} + +unsigned +narenas_total_get(void) +{ + + return (atomic_read_u(&narenas_total)); +} + +/* Create a new arena and insert it into the arenas array at index ind. */ +static arena_t * +arena_init_locked(tsdn_t *tsdn, unsigned ind) +{ + arena_t *arena; + + assert(ind <= narenas_total_get()); + if (ind > MALLOCX_ARENA_MAX) + return (NULL); + if (ind == narenas_total_get()) + narenas_total_inc(); + + /* + * Another thread may have already initialized arenas[ind] if it's an + * auto arena. + */ + arena = arena_get(tsdn, ind, false); + if (arena != NULL) { + assert(ind < narenas_auto); + return (arena); + } + + /* Actually initialize the arena. */ + arena = arena_new(tsdn, ind); + arena_set(ind, arena); + return (arena); +} + +arena_t * +arena_init(tsdn_t *tsdn, unsigned ind) +{ + arena_t *arena; + + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind); + malloc_mutex_unlock(tsdn, &arenas_lock); + return (arena); +} + +static void +arena_bind(tsd_t *tsd, unsigned ind, bool internal) +{ + arena_t *arena; + + if (!tsd_nominal(tsd)) + return; + + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_inc(arena, internal); + + if (internal) + tsd_iarena_set(tsd, arena); + else + tsd_arena_set(tsd, arena); +} + +void +arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) +{ + arena_t *oldarena, *newarena; + + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); + arena_nthreads_dec(oldarena, false); + arena_nthreads_inc(newarena, false); + tsd_arena_set(tsd, newarena); +} + +static void +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) +{ + arena_t *arena; + + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_dec(arena, internal); + if (internal) + tsd_iarena_set(tsd, NULL); + else + tsd_arena_set(tsd, NULL); +} + +arena_tdata_t * +arena_tdata_get_hard(tsd_t *tsd, unsigned ind) +{ + arena_tdata_t *tdata, *arenas_tdata_old; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + unsigned narenas_tdata_old, i; + unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); + unsigned narenas_actual = narenas_total_get(); + + /* + * Dissociate old tdata array (and set up for deallocation upon return) + * if it's too small. + */ + if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { + arenas_tdata_old = arenas_tdata; + narenas_tdata_old = narenas_tdata; + arenas_tdata = NULL; + narenas_tdata = 0; + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } else { + arenas_tdata_old = NULL; + narenas_tdata_old = 0; + } + + /* Allocate tdata array if it's missing. */ + if (arenas_tdata == NULL) { + bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); + narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; + + if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { + *arenas_tdata_bypassp = true; + arenas_tdata = (arena_tdata_t *)a0malloc( + sizeof(arena_tdata_t) * narenas_tdata); + *arenas_tdata_bypassp = false; + } + if (arenas_tdata == NULL) { + tdata = NULL; + goto label_return; + } + assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } + + /* + * Copy to tdata array. It's possible that the actual number of arenas + * has increased since narenas_total_get() was called above, but that + * causes no correctness issues unless two threads concurrently execute + * the arenas.extend mallctl, which we trust mallctl synchronization to + * prevent. + */ + + /* Copy/initialize tickers. */ + for (i = 0; i < narenas_actual; i++) { + if (i < narenas_tdata_old) { + ticker_copy(&arenas_tdata[i].decay_ticker, + &arenas_tdata_old[i].decay_ticker); + } else { + ticker_init(&arenas_tdata[i].decay_ticker, + DECAY_NTICKS_PER_UPDATE); + } + } + if (narenas_tdata > narenas_actual) { + memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) + * (narenas_tdata - narenas_actual)); + } + + /* Read the refreshed tdata array. */ + tdata = &arenas_tdata[ind]; +label_return: + if (arenas_tdata_old != NULL) + a0dalloc(arenas_tdata_old); + return (tdata); +} + +/* Slow path, called only by arena_choose(). */ +arena_t * +arena_choose_hard(tsd_t *tsd, bool internal) +{ + arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); + + if (narenas_auto > 1) { + unsigned i, j, choose[2], first_null; + + /* + * Determine binding for both non-internal and internal + * allocation. + * + * choose[0]: For application allocation. + * choose[1]: For internal metadata allocation. + */ + + for (j = 0; j < 2; j++) + choose[j] = 0; + + first_null = narenas_auto; + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); + for (i = 1; i < narenas_auto; i++) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { + /* + * Choose the first arena that has the lowest + * number of threads assigned to it. + */ + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) + choose[j] = i; + } + } else if (first_null == narenas_auto) { + /* + * Record the index of the first uninitialized + * arena, in case all extant arenas are in use. + * + * NB: It is possible for there to be + * discontinuities in terms of initialized + * versus uninitialized arenas, due to the + * "thread.arena" mallctl. + */ + first_null = i; + } + } + + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { + /* + * Use an unloaded arena, or the least loaded + * arena if all arenas are already initialized. + */ + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } + } else { + arena_t *arena; + + /* Initialize a new arena. */ + choose[j] = first_null; + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j]); + if (arena == NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); + return (NULL); + } + if (!!j == internal) + ret = arena; + } + arena_bind(tsd, choose[j], !!j); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); + } else { + ret = arena_get(tsd_tsdn(tsd), 0, false); + arena_bind(tsd, 0, false); + arena_bind(tsd, 0, true); + } + + return (ret); +} + +void +thread_allocated_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +thread_deallocated_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +iarena_cleanup(tsd_t *tsd) +{ + arena_t *iarena; + + iarena = tsd_iarena_get(tsd); + if (iarena != NULL) + arena_unbind(tsd, iarena->ind, true); +} + +void +arena_cleanup(tsd_t *tsd) +{ + arena_t *arena; + + arena = tsd_arena_get(tsd); + if (arena != NULL) + arena_unbind(tsd, arena->ind, false); +} + +void +arenas_tdata_cleanup(tsd_t *tsd) +{ + arena_tdata_t *arenas_tdata; + + /* Prevent tsd->arenas_tdata from being (re)created. */ + *tsd_arenas_tdata_bypassp_get(tsd) = true; + + arenas_tdata = tsd_arenas_tdata_get(tsd); + if (arenas_tdata != NULL) { + tsd_arenas_tdata_set(tsd, NULL); + a0dalloc(arenas_tdata); + } +} + +void +narenas_tdata_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +arenas_tdata_bypass_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +static void +stats_print_atexit(void) +{ + + if (config_tcache && config_stats) { + tsdn_t *tsdn; + unsigned narenas, i; + + tsdn = tsdn_fetch(); + + /* + * Merge stats from extant threads. This is racy, since + * individual threads do not lock when recording tcache stats + * events. As a consequence, the final stats may be slightly + * out of date by the time they are reported, if other threads + * continue to allocate. + */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena = arena_get(tsdn, i, false); + if (arena != NULL) { + tcache_t *tcache; + + /* + * tcache_stats_merge() locks bins, so if any + * code is introduced that acquires both arena + * and bin locks in the opposite order, + * deadlocks may result. + */ + malloc_mutex_lock(tsdn, &arena->lock); + ql_foreach(tcache, &arena->tcache_ql, link) { + tcache_stats_merge(tsdn, tcache, arena); + } + malloc_mutex_unlock(tsdn, &arena->lock); + } + } + } + je_malloc_stats_print(NULL, NULL, NULL); +} + +/* + * End miscellaneous support functions. + */ +/******************************************************************************/ +/* + * Begin initialization functions. + */ + +static char * +jemalloc_secure_getenv(const char *name) +{ +#ifdef JEMALLOC_HAVE_SECURE_GETENV + return secure_getenv(name); +#else +# ifdef JEMALLOC_HAVE_ISSETUGID + if (issetugid() != 0) + return (NULL); +# endif + return (getenv(name)); +#endif +} + +static unsigned +malloc_ncpus(void) +{ + long result; + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwNumberOfProcessors; +#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) + /* + * glibc >= 2.6 has the CPU_COUNT macro. + * + * glibc's sysconf() uses isspace(). glibc allocates for the first time + * *before* setting up the isspace tables. Therefore we need a + * different method to get the number of CPUs. + */ + { + cpu_set_t set; + + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); + result = CPU_COUNT(&set); + } +#else + result = sysconf(_SC_NPROCESSORS_ONLN); +#endif + return ((result == -1) ? 1 : (unsigned)result); +} + +static bool +malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, + char const **v_p, size_t *vlen_p) +{ + bool accept; + const char *opts = *opts_p; + + *k_p = opts; + + for (accept = false; !accept;) { + switch (*opts) { + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': + case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': + case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': + case 'Y': case 'Z': + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': + case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': + case 's': case 't': case 'u': case 'v': case 'w': case 'x': + case 'y': case 'z': + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + case '_': + opts++; + break; + case ':': + opts++; + *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; + *v_p = opts; + accept = true; + break; + case '\0': + if (opts != *opts_p) { + malloc_write(": Conf string ends " + "with key\n"); + } + return (true); + default: + malloc_write(": Malformed conf string\n"); + return (true); + } + } + + for (accept = false; !accept;) { + switch (*opts) { + case ',': + opts++; + /* + * Look ahead one character here, because the next time + * this function is called, it will assume that end of + * input has been cleanly reached if no input remains, + * but we have optimistically already consumed the + * comma if one exists. + */ + if (*opts == '\0') { + malloc_write(": Conf string ends " + "with comma\n"); + } + *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; + accept = true; + break; + case '\0': + *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; + accept = true; + break; + default: + opts++; + break; + } + } + + *opts_p = opts; + return (false); +} + +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) +{ + + malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, + (int)vlen, v); +} + +static void +malloc_slow_flag_init(void) +{ + /* + * Combine the runtime options into malloc_slow for fast path. Called + * after processing all the options. + */ + malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) + | (opt_junk_free ? flag_opt_junk_free : 0) + | (opt_quarantine ? flag_opt_quarantine : 0) + | (opt_zero ? flag_opt_zero : 0) + | (opt_utrace ? flag_opt_utrace : 0) + | (opt_xmalloc ? flag_opt_xmalloc : 0); + + if (config_valgrind) + malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); + + malloc_slow = (malloc_slow_flags != 0); +} + +static void +malloc_conf_init(void) +{ + unsigned i; + char buf[PATH_MAX + 1]; + const char *opts, *k, *v; + size_t klen, vlen; + + /* + * Automatically configure valgrind before processing options. The + * valgrind option remains in jemalloc 3.x for compatibility reasons. + */ + if (config_valgrind) { + in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; + if (config_fill && unlikely(in_valgrind)) { + opt_junk = "false"; + opt_junk_alloc = false; + opt_junk_free = false; + assert(!opt_zero); + opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; + opt_redzone = true; + } + if (config_tcache && unlikely(in_valgrind)) + opt_tcache = false; + } + + for (i = 0; i < 4; i++) { + /* Get runtime configuration. */ + switch (i) { + case 0: + opts = config_malloc_conf; + break; + case 1: + if (je_malloc_conf != NULL) { + /* + * Use options that were compiled into the + * program. + */ + opts = je_malloc_conf; + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + case 2: { + ssize_t linklen = 0; +#ifndef _WIN32 + int saved_errno = errno; + const char *linkname = +# ifdef JEMALLOC_PREFIX + "/etc/"JEMALLOC_PREFIX"malloc.conf" +# else + "/etc/malloc.conf" +# endif + ; + + /* + * Try to use the contents of the "/etc/malloc.conf" + * symbolic link's name. + */ + linklen = readlink(linkname, buf, sizeof(buf) - 1); + if (linklen == -1) { + /* No configuration specified. */ + linklen = 0; + /* Restore errno. */ + set_errno(saved_errno); + } +#endif + buf[linklen] = '\0'; + opts = buf; + break; + } case 3: { + const char *envname = +#ifdef JEMALLOC_PREFIX + JEMALLOC_CPREFIX"MALLOC_CONF" +#else + "MALLOC_CONF" +#endif + ; + + if ((opts = jemalloc_secure_getenv(envname)) != NULL) { + /* + * Do nothing; opts is already initialized to + * the value of the MALLOC_CONF environment + * variable. + */ + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + } default: + not_reached(); + buf[0] = '\0'; + opts = buf; + } + + while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, + &vlen)) { +#define CONF_MATCH(n) \ + (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) +#define CONF_MATCH_VALUE(n) \ + (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) +#define CONF_HANDLE_BOOL(o, n, cont) \ + if (CONF_MATCH(n)) { \ + if (CONF_MATCH_VALUE("true")) \ + o = true; \ + else if (CONF_MATCH_VALUE("false")) \ + o = false; \ + else { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } \ + if (cont) \ + continue; \ + } +#define CONF_MIN_no(um, min) false +#define CONF_MIN_yes(um, min) ((um) < (min)) +#define CONF_MAX_no(um, max) false +#define CONF_MAX_yes(um, max) ((um) > (max)) +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ + if (CONF_MATCH(n)) { \ + uintmax_t um; \ + char *end; \ + \ + set_errno(0); \ + um = malloc_strtoumax(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (clip) { \ + if (CONF_MIN_##check_min(um, \ + (t)(min))) \ + o = (t)(min); \ + else if (CONF_MAX_##check_max( \ + um, (t)(max))) \ + o = (t)(max); \ + else \ + o = (t)um; \ + } else { \ + if (CONF_MIN_##check_min(um, \ + (t)(min)) || \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ + malloc_conf_error( \ + "Out-of-range " \ + "conf value", \ + k, klen, v, vlen); \ + } else \ + o = (t)um; \ + } \ + continue; \ + } +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ + clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ + if (CONF_MATCH(n)) { \ + long l; \ + char *end; \ + \ + set_errno(0); \ + l = strtol(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (l < (ssize_t)(min) || l > \ + (ssize_t)(max)) { \ + malloc_conf_error( \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ + } else \ + o = l; \ + continue; \ + } +#define CONF_HANDLE_CHAR_P(o, n, d) \ + if (CONF_MATCH(n)) { \ + size_t cpylen = (vlen <= \ + sizeof(o)-1) ? vlen : \ + sizeof(o)-1; \ + strncpy(o, v, cpylen); \ + o[cpylen] = '\0'; \ + continue; \ + } + + CONF_HANDLE_BOOL(opt_abort, "abort", true) + /* + * Chunks always require at least one header page, as + * many as 2^(LG_SIZE_CLASS_GROUP+1) data pages (plus an + * additional page in the presence of cache-oblivious + * large), and possibly an additional page in the + * presence of redzones. In order to simplify options + * processing, use a conservative bound that + * accommodates all these constraints. + */ + CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + + LG_SIZE_CLASS_GROUP + 1 + ((config_cache_oblivious + || config_fill) ? 1 : 0), (sizeof(size_t) << 3) - 1, + yes, yes, true) + if (strncmp("dss", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < dss_prec_limit; i++) { + if (strncmp(dss_prec_names[i], v, vlen) + == 0) { + if (chunk_dss_prec_set(i)) { + malloc_conf_error( + "Error setting dss", + k, klen, v, vlen); + } else { + opt_dss = + dss_prec_names[i]; + match = true; + break; + } + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, + UINT_MAX, yes, no, false) + if (strncmp("purge", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < purge_mode_limit; i++) { + if (strncmp(purge_mode_names[i], v, + vlen) == 0) { + opt_purge = (purge_mode_t)i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", + -1, (sizeof(size_t) << 3) - 1) + CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, + NSTIME_SEC_MAX); + CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) + if (config_fill) { + if (CONF_MATCH("junk")) { + if (CONF_MATCH_VALUE("true")) { + if (config_valgrind && + unlikely(in_valgrind)) { + malloc_conf_error( + "Deallocation-time " + "junk filling cannot " + "be enabled while " + "running inside " + "Valgrind", k, klen, v, + vlen); + } else { + opt_junk = "true"; + opt_junk_alloc = true; + opt_junk_free = true; + } + } else if (CONF_MATCH_VALUE("false")) { + opt_junk = "false"; + opt_junk_alloc = opt_junk_free = + false; + } else if (CONF_MATCH_VALUE("alloc")) { + opt_junk = "alloc"; + opt_junk_alloc = true; + opt_junk_free = false; + } else if (CONF_MATCH_VALUE("free")) { + if (config_valgrind && + unlikely(in_valgrind)) { + malloc_conf_error( + "Deallocation-time " + "junk filling cannot " + "be enabled while " + "running inside " + "Valgrind", k, klen, v, + vlen); + } else { + opt_junk = "free"; + opt_junk_alloc = false; + opt_junk_free = true; + } + } else { + malloc_conf_error( + "Invalid conf value", k, + klen, v, vlen); + } + continue; + } + CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", + 0, SIZE_T_MAX, no, no, false) + CONF_HANDLE_BOOL(opt_redzone, "redzone", true) + CONF_HANDLE_BOOL(opt_zero, "zero", true) + } + if (config_utrace) { + CONF_HANDLE_BOOL(opt_utrace, "utrace", true) + } + if (config_xmalloc) { + CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) + } + if (config_tcache) { + CONF_HANDLE_BOOL(opt_tcache, "tcache", + !config_valgrind || !in_valgrind) + if (CONF_MATCH("tcache")) { + assert(config_valgrind && in_valgrind); + if (opt_tcache) { + opt_tcache = false; + malloc_conf_error( + "tcache cannot be enabled " + "while running inside Valgrind", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, + "lg_tcache_max", -1, + (sizeof(size_t) << 3) - 1) + } + if (config_thp) { + CONF_HANDLE_BOOL(opt_thp, "thp", true) + } + if (config_prof) { + CONF_HANDLE_BOOL(opt_prof, "prof", true) + CONF_HANDLE_CHAR_P(opt_prof_prefix, + "prof_prefix", "jeprof") + CONF_HANDLE_BOOL(opt_prof_active, "prof_active", + true) + CONF_HANDLE_BOOL(opt_prof_thread_active_init, + "prof_thread_active_init", true) + CONF_HANDLE_SIZE_T(opt_lg_prof_sample, + "lg_prof_sample", 0, (sizeof(uint64_t) << 3) + - 1, no, yes, true) + CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", + true) + CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, + "lg_prof_interval", -1, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", + true) + CONF_HANDLE_BOOL(opt_prof_final, "prof_final", + true) + CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", + true) + } + malloc_conf_error("Invalid conf pair", k, klen, v, + vlen); +#undef CONF_MATCH +#undef CONF_MATCH_VALUE +#undef CONF_HANDLE_BOOL +#undef CONF_MIN_no +#undef CONF_MIN_yes +#undef CONF_MAX_no +#undef CONF_MAX_yes +#undef CONF_HANDLE_T_U +#undef CONF_HANDLE_UNSIGNED +#undef CONF_HANDLE_SIZE_T +#undef CONF_HANDLE_SSIZE_T +#undef CONF_HANDLE_CHAR_P + } + } +} + +static bool +malloc_init_hard_needed(void) +{ + + if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == + malloc_init_recursible)) { + /* + * Another thread initialized the allocator before this one + * acquired init_lock, or this thread is the initializing + * thread, and it is recursively allocating. + */ + return (false); + } +#ifdef JEMALLOC_THREADED_INIT + if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { + spin_t spinner; + + /* Busy-wait until the initializing thread completes. */ + spin_init(&spinner); + do { + malloc_mutex_unlock(TSDN_NULL, &init_lock); + spin_adaptive(&spinner); + malloc_mutex_lock(TSDN_NULL, &init_lock); + } while (!malloc_initialized()); + return (false); + } +#endif + return (true); +} + +static bool +malloc_init_hard_a0_locked() +{ + + malloc_initializer = INITIALIZER; + + if (config_prof) + prof_boot0(); + malloc_conf_init(); + if (opt_stats_print) { + /* Print statistics at exit. */ + if (atexit(stats_print_atexit) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) + abort(); + } + } + pages_boot(); + if (base_boot()) + return (true); + if (chunk_boot()) + return (true); + if (ctl_boot()) + return (true); + if (config_prof) + prof_boot1(); + arena_boot(); + if (config_tcache && tcache_boot(TSDN_NULL)) + return (true); + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) + return (true); + /* + * Create enough scaffolding to allow recursive allocation in + * malloc_ncpus(). + */ + narenas_auto = 1; + narenas_total_set(narenas_auto); + arenas = &a0; + memset(arenas, 0, sizeof(arena_t *) * narenas_auto); + /* + * Initialize one arena here. The rest are lazily created in + * arena_choose_hard(). + */ + if (arena_init(TSDN_NULL, 0) == NULL) + return (true); + + malloc_init_state = malloc_init_a0_initialized; + + return (false); +} + +static bool +malloc_init_hard_a0(void) +{ + bool ret; + + malloc_mutex_lock(TSDN_NULL, &init_lock); + ret = malloc_init_hard_a0_locked(); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + return (ret); +} + +/* Initialize data structures which may trigger recursive allocation. */ +static bool +malloc_init_hard_recursible(void) +{ + + malloc_init_state = malloc_init_recursible; + + ncpus = malloc_ncpus(); + +#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ + !defined(__native_client__)) + /* LinuxThreads' pthread_atfork() allocates. */ + if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, + jemalloc_postfork_child) != 0) { + malloc_write(": Error in pthread_atfork()\n"); + if (opt_abort) + abort(); + return (true); + } +#endif + + return (false); +} + +static bool +malloc_init_hard_finish(tsdn_t *tsdn) +{ + + if (malloc_mutex_boot()) + return (true); + + if (opt_narenas == 0) { + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) + opt_narenas = ncpus << 2; + else + opt_narenas = 1; + } + narenas_auto = opt_narenas; + /* + * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). + */ + if (narenas_auto > MALLOCX_ARENA_MAX) { + narenas_auto = MALLOCX_ARENA_MAX; + malloc_printf(": Reducing narenas to limit (%d)\n", + narenas_auto); + } + narenas_total_set(narenas_auto); + + /* Allocate and initialize arenas. */ + arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * + (MALLOCX_ARENA_MAX+1)); + if (arenas == NULL) + return (true); + /* Copy the pointer to the one arena that was already initialized. */ + arena_set(0, a0); + + malloc_init_state = malloc_init_initialized; + malloc_slow_flag_init(); + + return (false); +} + +static bool +malloc_init_hard(void) +{ + tsd_t *tsd; + +#if defined(_WIN32) && _WIN32_WINNT < 0x0600 + _init_init_lock(); +#endif + malloc_mutex_lock(TSDN_NULL, &init_lock); + if (!malloc_init_hard_needed()) { + malloc_mutex_unlock(TSDN_NULL, &init_lock); + return (false); + } + + if (malloc_init_state != malloc_init_a0_initialized && + malloc_init_hard_a0_locked()) { + malloc_mutex_unlock(TSDN_NULL, &init_lock); + return (true); + } + + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) + return (true); + if (malloc_init_hard_recursible()) + return (true); + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); + + if (config_prof && prof_boot2(tsd)) { + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); + return (true); + } + + if (malloc_init_hard_finish(tsd_tsdn(tsd))) { + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); + return (true); + } + + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); + malloc_tsd_boot1(); + return (false); +} + +/* + * End initialization functions. + */ +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ + +static void * +ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, + prof_tctx_t *tctx, bool slow_path) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + szind_t ind_large = size2index(LARGE_MINCLASS); + p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); + if (p == NULL) + return (NULL); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); + } else + p = ialloc(tsd, usize, ind, zero, slow_path); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) +{ + void *p; + prof_tctx_t *tctx; + + tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) + p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); + else + p = ialloc(tsd, usize, ind, zero, slow_path); + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_malloc(tsd_tsdn(tsd), p, usize, tctx); + + return (p); +} + +/* + * ialloc_body() is inlined so that fast and slow paths are generated separately + * with statically known slow_path. + * + * This function guarantees that *tsdn is non-NULL on success. + */ +JEMALLOC_ALWAYS_INLINE_C void * +ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, + bool slow_path) +{ + tsd_t *tsd; + szind_t ind; + + if (slow_path && unlikely(malloc_init())) { + *tsdn = NULL; + return (NULL); + } + + tsd = tsd_fetch(); + *tsdn = tsd_tsdn(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); + + ind = size2index(size); + if (unlikely(ind >= NSIZES)) + return (NULL); + + if (config_stats || (config_prof && opt_prof) || (slow_path && + config_valgrind && unlikely(in_valgrind))) { + *usize = index2size(ind); + assert(*usize > 0 && *usize <= HUGE_MAXCLASS); + } + + if (config_prof && opt_prof) + return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); + + return (ialloc(tsd, size, ind, zero, slow_path)); +} + +JEMALLOC_ALWAYS_INLINE_C void +ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, + bool update_errno, bool slow_path) +{ + + assert(!tsdn_null(tsdn) || ret == NULL); + + if (unlikely(ret == NULL)) { + if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { + malloc_printf(": Error in %s(): out of " + "memory\n", func); + abort(); + } + if (update_errno) + set_errno(ENOMEM); + } + if (config_stats && likely(ret != NULL)) { + assert(usize == isalloc(tsdn, ret, config_prof)); + *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; + } + witness_assert_lockless(tsdn); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_malloc(size_t size) +{ + void *ret; + tsdn_t *tsdn; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + if (size == 0) + size = 1; + + if (likely(!malloc_slow)) { + ret = ialloc_body(size, false, &tsdn, &usize, false); + ialloc_post_check(ret, tsdn, usize, "malloc", true, false); + } else { + ret = ialloc_body(size, false, &tsdn, &usize, true); + ialloc_post_check(ret, tsdn, usize, "malloc", true, true); + UTRACE(0, size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); + } + + return (ret); +} + +static void * +imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, + prof_tctx_t *tctx) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); + p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); + if (p == NULL) + return (NULL); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); + } else + p = ipalloc(tsd, usize, alignment, false); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) +{ + void *p; + prof_tctx_t *tctx; + + tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) + p = imemalign_prof_sample(tsd, alignment, usize, tctx); + else + p = ipalloc(tsd, usize, alignment, false); + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_malloc(tsd_tsdn(tsd), p, usize, tctx); + + return (p); +} + +JEMALLOC_ATTR(nonnull(1)) +static int +imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) +{ + int ret; + tsd_t *tsd; + size_t usize; + void *result; + + assert(min_alignment != 0); + + if (unlikely(malloc_init())) { + tsd = NULL; + result = NULL; + goto label_oom; + } + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + if (size == 0) + size = 1; + + /* Make sure that alignment is a large enough power of 2. */ + if (unlikely(((alignment - 1) & alignment) != 0 + || (alignment < min_alignment))) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error allocating " + "aligned memory: invalid alignment\n"); + abort(); + } + result = NULL; + ret = EINVAL; + goto label_return; + } + + usize = sa2u(size, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { + result = NULL; + goto label_oom; + } + + if (config_prof && opt_prof) + result = imemalign_prof(tsd, alignment, usize); + else + result = ipalloc(tsd, usize, alignment, false); + if (unlikely(result == NULL)) + goto label_oom; + assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); + + *memptr = result; + ret = 0; +label_return: + if (config_stats && likely(result != NULL)) { + assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); + *tsd_thread_allocatedp_get(tsd) += usize; + } + UTRACE(0, size, result); + JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, + false); + witness_assert_lockless(tsd_tsdn(tsd)); + return (ret); +label_oom: + assert(result == NULL); + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error allocating aligned memory: " + "out of memory\n"); + abort(); + } + ret = ENOMEM; + witness_assert_lockless(tsd_tsdn(tsd)); + goto label_return; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +JEMALLOC_ATTR(nonnull(1)) +je_posix_memalign(void **memptr, size_t alignment, size_t size) +{ + int ret; + + ret = imemalign(memptr, alignment, size, sizeof(void *)); + + return (ret); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) +je_aligned_alloc(size_t alignment, size_t size) +{ + void *ret; + int err; + + if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { + ret = NULL; + set_errno(err); + } + + return (ret); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +je_calloc(size_t num, size_t size) +{ + void *ret; + tsdn_t *tsdn; + size_t num_size; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + num_size = num * size; + if (unlikely(num_size == 0)) { + if (num == 0 || size == 0) + num_size = 1; + else + num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ + /* + * Try to avoid division here. We know that it isn't possible to + * overflow during multiplication if neither operand uses any of the + * most significant half of the bits in a size_t. + */ + } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << + 2))) && (num_size / size != num))) + num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ + + if (likely(!malloc_slow)) { + ret = ialloc_body(num_size, true, &tsdn, &usize, false); + ialloc_post_check(ret, tsdn, usize, "calloc", true, false); + } else { + ret = ialloc_body(num_size, true, &tsdn, &usize, true); + ialloc_post_check(ret, tsdn, usize, "calloc", true, true); + UTRACE(0, num_size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); + } + + return (ret); +} + +static void * +irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + prof_tctx_t *tctx) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); + if (p == NULL) + return (NULL); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); + } else + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) +{ + void *p; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); + tctx = prof_alloc_prep(tsd, usize, prof_active, true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) + p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); + else + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, + old_tctx); + + return (p); +} + +JEMALLOC_INLINE_C void +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) +{ + size_t usize; + UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + + witness_assert_lockless(tsd_tsdn(tsd)); + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + if (config_prof && opt_prof) { + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + prof_free(tsd, ptr, usize); + } else if (config_stats || config_valgrind) + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + if (config_stats) + *tsd_thread_deallocatedp_get(tsd) += usize; + + if (likely(!slow_path)) + iqalloc(tsd, ptr, tcache, false); + else { + if (config_valgrind && unlikely(in_valgrind)) + rzsize = p2rz(tsd_tsdn(tsd), ptr); + iqalloc(tsd, ptr, tcache, true); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); + } +} + +JEMALLOC_INLINE_C void +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) +{ + UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + + witness_assert_lockless(tsd_tsdn(tsd)); + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + if (config_prof && opt_prof) + prof_free(tsd, ptr, usize); + if (config_stats) + *tsd_thread_deallocatedp_get(tsd) += usize; + if (config_valgrind && unlikely(in_valgrind)) + rzsize = p2rz(tsd_tsdn(tsd), ptr); + isqalloc(tsd, ptr, usize, tcache, slow_path); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_realloc(void *ptr, size_t size) +{ + void *ret; + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + size_t old_usize = 0; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + + if (unlikely(size == 0)) { + if (ptr != NULL) { + tsd_t *tsd; + + /* realloc(ptr, 0) is equivalent to free(ptr). */ + UTRACE(ptr, 0, 0); + tsd = tsd_fetch(); + ifree(tsd, ptr, tcache_get(tsd, false), true); + return (NULL); + } + size = 1; + } + + if (likely(ptr != NULL)) { + tsd_t *tsd; + + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + tsd = tsd_fetch(); + + witness_assert_lockless(tsd_tsdn(tsd)); + + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + if (config_valgrind && unlikely(in_valgrind)) { + old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : + u2rz(old_usize); + } + + if (config_prof && opt_prof) { + usize = s2u(size); + ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? + NULL : irealloc_prof(tsd, ptr, old_usize, usize); + } else { + if (config_stats || (config_valgrind && + unlikely(in_valgrind))) + usize = s2u(size); + ret = iralloc(tsd, ptr, old_usize, size, 0, false); + } + tsdn = tsd_tsdn(tsd); + } else { + /* realloc(NULL, size) is equivalent to malloc(size). */ + if (likely(!malloc_slow)) + ret = ialloc_body(size, false, &tsdn, &usize, false); + else + ret = ialloc_body(size, false, &tsdn, &usize, true); + assert(!tsdn_null(tsdn) || ret == NULL); + } + + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in realloc(): " + "out of memory\n"); + abort(); + } + set_errno(ENOMEM); + } + if (config_stats && likely(ret != NULL)) { + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret, config_prof)); + tsd = tsdn_tsd(tsdn); + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + UTRACE(ptr, size, ret); + JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr, + old_usize, old_rzsize, maybe, false); + witness_assert_lockless(tsdn); + return (ret); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_free(void *ptr) +{ + + UTRACE(ptr, 0, 0); + if (likely(ptr != NULL)) { + tsd_t *tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + if (likely(!malloc_slow)) + ifree(tsd, ptr, tcache_get(tsd, false), false); + else + ifree(tsd, ptr, tcache_get(tsd, false), true); + witness_assert_lockless(tsd_tsdn(tsd)); + } +} + +/* + * End malloc(3)-compatible functions. + */ +/******************************************************************************/ +/* + * Begin non-standard override functions. + */ + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_memalign(size_t alignment, size_t size) +{ + void *ret JEMALLOC_CC_SILENCE_INIT(NULL); + if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) + ret = NULL; + return (ret); +} +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_valloc(size_t size) +{ + void *ret JEMALLOC_CC_SILENCE_INIT(NULL); + if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) + ret = NULL; + return (ret); +} +#endif + +/* + * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has + * #define je_malloc malloc + */ +#define malloc_is_malloc 1 +#define is_malloc_(a) malloc_is_ ## a +#define is_malloc(a) is_malloc_(a) + +#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) +/* + * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible + * to inconsistently reference libc's malloc(3)-compatible functions + * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). + * + * These definitions interpose hooks in glibc. The functions are actually + * passed an extra argument for the caller return address, which will be + * ignored. + */ +JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; +JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; +JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; +# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK +JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = + je_memalign; +# endif + +#ifdef CPU_COUNT +/* + * To enable static linking with glibc, the libc specific malloc interface must + * be implemented also, so none of glibc's malloc.o functions are added to the + * link. + */ +#define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +/* To force macro expansion of je_ prefix before stringification. */ +#define PREALIAS(je_fn) ALIAS(je_fn) +void *__libc_malloc(size_t size) PREALIAS(je_malloc); +void __libc_free(void* ptr) PREALIAS(je_free); +void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); +void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); +void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); +void *__libc_valloc(size_t size) PREALIAS(je_valloc); +int __posix_memalign(void** r, size_t a, size_t s) + PREALIAS(je_posix_memalign); +#undef PREALIAS +#undef ALIAS + +#endif + +#endif + +/* + * End non-standard override functions. + */ +/******************************************************************************/ +/* + * Begin non-standard functions. + */ + +JEMALLOC_ALWAYS_INLINE_C bool +imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, + size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) +{ + + if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { + *alignment = 0; + *usize = s2u(size); + } else { + *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); + *usize = sa2u(size, *alignment); + } + if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) + return (true); + *zero = MALLOCX_ZERO_GET(flags); + if ((flags & MALLOCX_TCACHE_MASK) != 0) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + *tcache = NULL; + else + *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } else + *tcache = tcache_get(tsd, true); + if ((flags & MALLOCX_ARENA_MASK) != 0) { + unsigned arena_ind = MALLOCX_ARENA_GET(flags); + *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); + if (unlikely(*arena == NULL)) + return (true); + } else + *arena = NULL; + return (false); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena, bool slow_path) +{ + szind_t ind; + + if (unlikely(alignment != 0)) + return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); + ind = size2index(usize); + assert(ind < NSIZES); + return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, + slow_path)); +} + +static void * +imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena, bool slow_path) +{ + void *p; + + if (usize <= SMALL_MAXCLASS) { + assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : + sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); + p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, + tcache, arena, slow_path); + if (p == NULL) + return (NULL); + arena_prof_promoted(tsdn, p, usize); + } else { + p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, + slow_path); + } + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) +{ + void *p; + size_t alignment; + bool zero; + tcache_t *tcache; + arena_t *arena; + prof_tctx_t *tctx; + + if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, + &zero, &tcache, &arena))) + return (NULL); + tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, + tcache, arena, slow_path); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, + tcache, arena, slow_path); + } else + p = NULL; + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); + + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, + bool slow_path) +{ + void *p; + size_t alignment; + bool zero; + tcache_t *tcache; + arena_t *arena; + + if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, + &zero, &tcache, &arena))) + return (NULL); + p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, + arena, slow_path); + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + return (p); +} + +/* This function guarantees that *tsdn is non-NULL on success. */ +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, + bool slow_path) +{ + tsd_t *tsd; + + if (slow_path && unlikely(malloc_init())) { + *tsdn = NULL; + return (NULL); + } + + tsd = tsd_fetch(); + *tsdn = tsd_tsdn(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); + + if (likely(flags == 0)) { + szind_t ind = size2index(size); + if (unlikely(ind >= NSIZES)) + return (NULL); + if (config_stats || (config_prof && opt_prof) || (slow_path && + config_valgrind && unlikely(in_valgrind))) { + *usize = index2size(ind); + assert(*usize > 0 && *usize <= HUGE_MAXCLASS); + } + + if (config_prof && opt_prof) { + return (ialloc_prof(tsd, *usize, ind, false, + slow_path)); + } + + return (ialloc(tsd, size, ind, false, slow_path)); + } + + if (config_prof && opt_prof) + return (imallocx_prof(tsd, size, flags, usize, slow_path)); + + return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_mallocx(size_t size, int flags) +{ + tsdn_t *tsdn; + void *p; + size_t usize; + + assert(size != 0); + + if (likely(!malloc_slow)) { + p = imallocx_body(size, flags, &tsdn, &usize, false); + ialloc_post_check(p, tsdn, usize, "mallocx", false, false); + } else { + p = imallocx_body(size, flags, &tsdn, &usize, true); + ialloc_post_check(p, tsdn, usize, "mallocx", false, true); + UTRACE(0, size, p); + JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, + MALLOCX_ZERO_GET(flags)); + } + + return (p); +} + +static void * +irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, + prof_tctx_t *tctx) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, + zero, tcache, arena); + if (p == NULL) + return (NULL); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); + } else { + p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, + tcache, arena); + } + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, + size_t alignment, size_t *usize, bool zero, tcache_t *tcache, + arena_t *arena) +{ + void *p; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); + tctx = prof_alloc_prep(tsd, *usize, prof_active, false); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, + alignment, zero, tcache, arena, tctx); + } else { + p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, + tcache, arena); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, false); + return (NULL); + } + + if (p == old_ptr && alignment != 0) { + /* + * The allocation did not move, so it is possible that the size + * class is smaller than would guarantee the requested + * alignment, and that the alignment constraint was + * serendipitously satisfied. Additionally, old_usize may not + * be the same as the current usize because of in-place large + * reallocation. Therefore, query the actual value of usize. + */ + *usize = isalloc(tsd_tsdn(tsd), p, config_prof); + } + prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, + old_usize, old_tctx); + + return (p); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_rallocx(void *ptr, size_t size, int flags) +{ + void *p; + tsd_t *tsd; + size_t usize; + size_t old_usize; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = MALLOCX_ALIGN_GET(flags); + bool zero = flags & MALLOCX_ZERO; + arena_t *arena; + tcache_t *tcache; + + assert(ptr != NULL); + assert(size != 0); + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { + unsigned arena_ind = MALLOCX_ARENA_GET(flags); + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); + if (unlikely(arena == NULL)) + goto label_oom; + } else + arena = NULL; + + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + tcache = NULL; + else + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } else + tcache = tcache_get(tsd, true); + + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + if (config_valgrind && unlikely(in_valgrind)) + old_rzsize = u2rz(old_usize); + + if (config_prof && opt_prof) { + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) + goto label_oom; + p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, + zero, tcache, arena); + if (unlikely(p == NULL)) + goto label_oom; + } else { + p = iralloct(tsd, ptr, old_usize, size, alignment, zero, + tcache, arena); + if (unlikely(p == NULL)) + goto label_oom; + if (config_stats || (config_valgrind && unlikely(in_valgrind))) + usize = isalloc(tsd_tsdn(tsd), p, config_prof); + } + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + UTRACE(ptr, size, p); + JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr, + old_usize, old_rzsize, no, zero); + witness_assert_lockless(tsd_tsdn(tsd)); + return (p); +label_oom: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in rallocx(): out of memory\n"); + abort(); + } + UTRACE(ptr, size, 0); + witness_assert_lockless(tsd_tsdn(tsd)); + return (NULL); +} + +JEMALLOC_ALWAYS_INLINE_C size_t +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) +{ + size_t usize; + + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) + return (old_usize); + usize = isalloc(tsdn, ptr, config_prof); + + return (usize); +} + +static size_t +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) +{ + size_t usize; + + if (tctx == NULL) + return (old_usize); + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, + zero); + + return (usize); +} + +JEMALLOC_ALWAYS_INLINE_C size_t +ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) +{ + size_t usize_max, usize; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); + /* + * usize isn't knowable before ixalloc() returns when extra is non-zero. + * Therefore, compute its maximum possible value and use that in + * prof_alloc_prep() to decide whether to capture a backtrace. + * prof_realloc() will use the actual usize to decide whether to sample. + */ + if (alignment == 0) { + usize_max = s2u(size+extra); + assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); + } else { + usize_max = sa2u(size+extra, alignment); + if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { + /* + * usize_max is out of range, and chances are that + * allocation will fail, but use the maximum possible + * value and carry on with prof_alloc_prep(), just in + * case allocation succeeds. + */ + usize_max = HUGE_MAXCLASS; + } + } + tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); + } else { + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); + } + if (usize == old_usize) { + prof_alloc_rollback(tsd, tctx, false); + return (usize); + } + prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, + old_tctx); + + return (usize); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_xallocx(void *ptr, size_t size, size_t extra, int flags) +{ + tsd_t *tsd; + size_t usize, old_usize; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = MALLOCX_ALIGN_GET(flags); + bool zero = flags & MALLOCX_ZERO; + + assert(ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + + /* + * The API explicitly absolves itself of protecting against (size + + * extra) numerical overflow, but we may need to clamp extra to avoid + * exceeding HUGE_MAXCLASS. + * + * Ordinarily, size limit checking is handled deeper down, but here we + * have to check as part of (size + extra) clamping, since we need the + * clamped value in the above helper functions. + */ + if (unlikely(size > HUGE_MAXCLASS)) { + usize = old_usize; + goto label_not_resized; + } + if (unlikely(HUGE_MAXCLASS - size < extra)) + extra = HUGE_MAXCLASS - size; + + if (config_valgrind && unlikely(in_valgrind)) + old_rzsize = u2rz(old_usize); + + if (config_prof && opt_prof) { + usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, + alignment, zero); + } else { + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); + } + if (unlikely(usize == old_usize)) + goto label_not_resized; + + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr, + old_usize, old_rzsize, no, zero); +label_not_resized: + UTRACE(ptr, size, ptr); + witness_assert_lockless(tsd_tsdn(tsd)); + return (usize); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_sallocx(const void *ptr, int flags) +{ + size_t usize; + tsdn_t *tsdn; + + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + + if (config_ivsalloc) + usize = ivsalloc(tsdn, ptr, config_prof); + else + usize = isalloc(tsdn, ptr, config_prof); + + witness_assert_lockless(tsdn); + return (usize); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_dallocx(void *ptr, int flags) +{ + tsd_t *tsd; + tcache_t *tcache; + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + tcache = NULL; + else + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } else + tcache = tcache_get(tsd, false); + + UTRACE(ptr, 0, 0); + if (likely(!malloc_slow)) + ifree(tsd, ptr, tcache, false); + else + ifree(tsd, ptr, tcache, true); + witness_assert_lockless(tsd_tsdn(tsd)); +} + +JEMALLOC_ALWAYS_INLINE_C size_t +inallocx(tsdn_t *tsdn, size_t size, int flags) +{ + size_t usize; + + witness_assert_lockless(tsdn); + + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) + usize = s2u(size); + else + usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + witness_assert_lockless(tsdn); + return (usize); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_sdallocx(void *ptr, size_t size, int flags) +{ + tsd_t *tsd; + tcache_t *tcache; + size_t usize; + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); + + witness_assert_lockless(tsd_tsdn(tsd)); + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + tcache = NULL; + else + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } else + tcache = tcache_get(tsd, false); + + UTRACE(ptr, 0, 0); + if (likely(!malloc_slow)) + isfree(tsd, ptr, usize, tcache, false); + else + isfree(tsd, ptr, usize, tcache, true); + witness_assert_lockless(tsd_tsdn(tsd)); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_nallocx(size_t size, int flags) +{ + size_t usize; + tsdn_t *tsdn; + + assert(size != 0); + + if (unlikely(malloc_init())) + return (0); + + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + + usize = inallocx(tsdn, size, flags); + if (unlikely(usize > HUGE_MAXCLASS)) + return (0); + + witness_assert_lockless(tsdn); + return (usize); +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + int ret; + tsd_t *tsd; + + if (unlikely(malloc_init())) + return (EAGAIN); + + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); + witness_assert_lockless(tsd_tsdn(tsd)); + return (ret); +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) +{ + int ret; + tsdn_t *tsdn; + + if (unlikely(malloc_init())) + return (EAGAIN); + + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + ret = ctl_nametomib(tsdn, name, mibp, miblenp); + witness_assert_lockless(tsdn); + return (ret); +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + tsd_t *tsd; + + if (unlikely(malloc_init())) + return (EAGAIN); + + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + witness_assert_lockless(tsd_tsdn(tsd)); + return (ret); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + stats_print(write_cb, cbopaque, opts); + witness_assert_lockless(tsdn); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) +{ + size_t ret; + tsdn_t *tsdn; + + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + + if (config_ivsalloc) + ret = ivsalloc(tsdn, ptr, config_prof); + else + ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); + + witness_assert_lockless(tsdn); + return (ret); +} + +/* + * End non-standard functions. + */ +/******************************************************************************/ +/* + * The following functions are used by threading libraries for protection of + * malloc during fork(). + */ + +/* + * If an application creates a thread before doing any allocation in the main + * thread, then calls fork(2) in the main thread followed by memory allocation + * in the child process, a race can occur that results in deadlock within the + * child: the main thread may have forked while the created thread had + * partially initialized the allocator. Ordinarily jemalloc prevents + * fork/malloc races via the following functions it registers during + * initialization using pthread_atfork(), but of course that does no good if + * the allocator isn't fully initialized at fork time. The following library + * constructor is a partial solution to this problem. It may still be possible + * to trigger the deadlock described above, but doing so would involve forking + * via a library constructor that runs before jemalloc's runs. + */ +#ifndef JEMALLOC_JET +JEMALLOC_ATTR(constructor) +static void +jemalloc_constructor(void) +{ + + malloc_init(); +} +#endif + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_prefork(void) +#else +JEMALLOC_EXPORT void +_malloc_prefork(void) +#endif +{ + tsd_t *tsd; + unsigned i, j, narenas; + arena_t *arena; + +#ifdef JEMALLOC_MUTEX_INIT_CB + if (!malloc_initialized()) + return; +#endif + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + narenas = narenas_total_get(); + + witness_prefork(tsd); + /* Acquire all mutexes in a safe order. */ + ctl_prefork(tsd_tsdn(tsd)); + tcache_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + prof_prefork0(tsd_tsdn(tsd)); + for (i = 0; i < 3; i++) { + for (j = 0; j < narenas; j++) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { + switch (i) { + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; + default: not_reached(); + } + } + } + } + base_prefork(tsd_tsdn(tsd)); + for (i = 0; i < narenas; i++) { + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_prefork3(tsd_tsdn(tsd), arena); + } + prof_prefork1(tsd_tsdn(tsd)); +} + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_postfork_parent(void) +#else +JEMALLOC_EXPORT void +_malloc_postfork(void) +#endif +{ + tsd_t *tsd; + unsigned i, narenas; + +#ifdef JEMALLOC_MUTEX_INIT_CB + if (!malloc_initialized()) + return; +#endif + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + witness_postfork_parent(tsd); + /* Release all mutexes, now that fork() has completed. */ + base_postfork_parent(tsd_tsdn(tsd)); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_postfork_parent(tsd_tsdn(tsd), arena); + } + prof_postfork_parent(tsd_tsdn(tsd)); + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_parent(tsd_tsdn(tsd)); + ctl_postfork_parent(tsd_tsdn(tsd)); +} + +void +jemalloc_postfork_child(void) +{ + tsd_t *tsd; + unsigned i, narenas; + + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + witness_postfork_child(tsd); + /* Release all mutexes, now that fork() has completed. */ + base_postfork_child(tsd_tsdn(tsd)); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_postfork_child(tsd_tsdn(tsd), arena); + } + prof_postfork_child(tsd_tsdn(tsd)); + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_child(tsd_tsdn(tsd)); + ctl_postfork_child(tsd_tsdn(tsd)); +} + +/******************************************************************************/ diff --git a/sources/jemalloc/src/je_mb.c b/sources/jemalloc/src/je_mb.c new file mode 100755 index 00000000..dc2c0a25 --- /dev/null +++ b/sources/jemalloc/src/je_mb.c @@ -0,0 +1,2 @@ +#define JEMALLOC_MB_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_mutex.c b/sources/jemalloc/src/je_mutex.c new file mode 100755 index 00000000..6333e73d --- /dev/null +++ b/sources/jemalloc/src/je_mutex.c @@ -0,0 +1,158 @@ +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +#include +#endif + +#ifndef _CRT_SPINCOUNT +#define _CRT_SPINCOUNT 4000 +#endif + +/******************************************************************************/ +/* Data. */ + +#ifdef JEMALLOC_LAZY_LOCK +bool isthreaded = false; +#endif +#ifdef JEMALLOC_MUTEX_INIT_CB +static bool postpone_init = true; +static malloc_mutex_t *postponed_mutexes = NULL; +#endif + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +static void pthread_create_once(void); +#endif + +/******************************************************************************/ +/* + * We intercept pthread_create() calls in order to toggle isthreaded if the + * process goes multi-threaded. + */ + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); + +static void +pthread_create_once(void) +{ + + pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); + if (pthread_create_fptr == NULL) { + malloc_write(": Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } + + isthreaded = true; +} + +JEMALLOC_EXPORT int +pthread_create(pthread_t *__restrict thread, + const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), + void *__restrict arg) +{ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + pthread_once(&once_control, pthread_create_once); + + return (pthread_create_fptr(thread, attr, start_routine, arg)); +} +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_MUTEX_INIT_CB +JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)); +#endif + +bool +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) +{ + +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + InitializeSRWLock(&mutex->lock); +# else + if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, + _CRT_SPINCOUNT)) + return (true); +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mutex->lock = OS_UNFAIR_LOCK_INIT; +#elif (defined(JEMALLOC_OSSPIN)) + mutex->lock = 0; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + if (postpone_init) { + mutex->postponed_next = postponed_mutexes; + postponed_mutexes = mutex; + } else { + if (_pthread_mutex_init_calloc_cb(&mutex->lock, + bootstrap_calloc) != 0) + return (true); + } +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) + return (true); + pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); + if (pthread_mutex_init(&mutex->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return (true); + } + pthread_mutexattr_destroy(&attr); +#endif + if (config_debug) + witness_init(&mutex->witness, name, rank, NULL); + return (false); +} + +void +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + malloc_mutex_lock(tsdn, mutex); +} + +void +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + malloc_mutex_unlock(tsdn, mutex); +} + +void +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + +#ifdef JEMALLOC_MUTEX_INIT_CB + malloc_mutex_unlock(tsdn, mutex); +#else + if (malloc_mutex_init(mutex, mutex->witness.name, + mutex->witness.rank)) { + malloc_printf(": Error re-initializing mutex in " + "child\n"); + if (opt_abort) + abort(); + } +#endif +} + +bool +malloc_mutex_boot(void) +{ + +#ifdef JEMALLOC_MUTEX_INIT_CB + postpone_init = false; + while (postponed_mutexes != NULL) { + if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, + bootstrap_calloc) != 0) + return (true); + postponed_mutexes = postponed_mutexes->postponed_next; + } +#endif + return (false); +} diff --git a/sources/jemalloc/src/je_nstime.c b/sources/jemalloc/src/je_nstime.c new file mode 100755 index 00000000..0948e29f --- /dev/null +++ b/sources/jemalloc/src/je_nstime.c @@ -0,0 +1,194 @@ +#include "jemalloc/internal/jemalloc_internal.h" + +#define BILLION UINT64_C(1000000000) + +void +nstime_init(nstime_t *time, uint64_t ns) +{ + + time->ns = ns; +} + +void +nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) +{ + + time->ns = sec * BILLION + nsec; +} + +uint64_t +nstime_ns(const nstime_t *time) +{ + + return (time->ns); +} + +uint64_t +nstime_sec(const nstime_t *time) +{ + + return (time->ns / BILLION); +} + +uint64_t +nstime_nsec(const nstime_t *time) +{ + + return (time->ns % BILLION); +} + +void +nstime_copy(nstime_t *time, const nstime_t *source) +{ + + *time = *source; +} + +int +nstime_compare(const nstime_t *a, const nstime_t *b) +{ + + return ((a->ns > b->ns) - (a->ns < b->ns)); +} + +void +nstime_add(nstime_t *time, const nstime_t *addend) +{ + + assert(UINT64_MAX - time->ns >= addend->ns); + + time->ns += addend->ns; +} + +void +nstime_subtract(nstime_t *time, const nstime_t *subtrahend) +{ + + assert(nstime_compare(time, subtrahend) >= 0); + + time->ns -= subtrahend->ns; +} + +void +nstime_imultiply(nstime_t *time, uint64_t multiplier) +{ + + assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << + 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); + + time->ns *= multiplier; +} + +void +nstime_idivide(nstime_t *time, uint64_t divisor) +{ + + assert(divisor != 0); + + time->ns /= divisor; +} + +uint64_t +nstime_divide(const nstime_t *time, const nstime_t *divisor) +{ + + assert(divisor->ns != 0); + + return (time->ns / divisor->ns); +} + +#ifdef _WIN32 +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + FILETIME ft; + uint64_t ticks_100ns; + + GetSystemTimeAsFileTime(&ft); + ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + nstime_init(time, ticks_100ns * 100); +} +#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif JEMALLOC_HAVE_CLOCK_MONOTONIC +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + + nstime_init(time, mach_absolute_time()); +} +#else +# define NSTIME_MONOTONIC false +static void +nstime_get(nstime_t *time) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); +} +#endif + +#ifdef JEMALLOC_JET +#undef nstime_monotonic +#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic) +#endif +bool +nstime_monotonic(void) +{ + + return (NSTIME_MONOTONIC); +#undef NSTIME_MONOTONIC +} +#ifdef JEMALLOC_JET +#undef nstime_monotonic +#define nstime_monotonic JEMALLOC_N(nstime_monotonic) +nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic); +#endif + +#ifdef JEMALLOC_JET +#undef nstime_update +#define nstime_update JEMALLOC_N(n_nstime_update) +#endif +bool +nstime_update(nstime_t *time) +{ + nstime_t old_time; + + nstime_copy(&old_time, time); + nstime_get(time); + + /* Handle non-monotonic clocks. */ + if (unlikely(nstime_compare(&old_time, time) > 0)) { + nstime_copy(time, &old_time); + return (true); + } + + return (false); +} +#ifdef JEMALLOC_JET +#undef nstime_update +#define nstime_update JEMALLOC_N(nstime_update) +nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update); +#endif diff --git a/sources/jemalloc/src/je_pages.c b/sources/jemalloc/src/je_pages.c new file mode 100755 index 00000000..7698e49b --- /dev/null +++ b/sources/jemalloc/src/je_pages.c @@ -0,0 +1,302 @@ +#define JEMALLOC_PAGES_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#include +#endif + +/******************************************************************************/ +/* Data. */ + +#ifndef _WIN32 +# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) +# define PAGES_PROT_DECOMMIT (PROT_NONE) +static int mmap_flags; +#endif +static bool os_overcommits; + +/******************************************************************************/ + +void * +pages_map(void *addr, size_t size, bool *commit) +{ + void *ret; + + assert(size != 0); + + if (os_overcommits) + *commit = true; + +#ifdef _WIN32 + /* + * If VirtualAlloc can't allocate at the given address when one is + * given, it fails and returns NULL. + */ + ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), + PAGE_READWRITE); +#else + /* + * We don't use MAP_FIXED here, because it can cause the *replacement* + * of existing mappings, and we only want to create new mappings. + */ + { + int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + + ret = mmap(addr, size, prot, mmap_flags, -1, 0); + } + assert(ret != NULL); + + if (ret == MAP_FAILED) + ret = NULL; + else if (addr != NULL && ret != addr) { + /* + * We succeeded in mapping memory, but not in the right place. + */ + pages_unmap(ret, size); + ret = NULL; + } +#endif + assert(ret == NULL || (addr == NULL && ret != addr) + || (addr != NULL && ret == addr)); + return (ret); +} + +void +pages_unmap(void *addr, size_t size) +{ + +#ifdef _WIN32 + if (VirtualFree(addr, 0, MEM_RELEASE) == 0) +#else + if (munmap(addr, size) == -1) +#endif + { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + malloc_printf(": Error in " +#ifdef _WIN32 + "VirtualFree" +#else + "munmap" +#endif + "(): %s\n", buf); + if (opt_abort) + abort(); + } +} + +void * +pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) +{ + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + { + void *new_addr; + + pages_unmap(addr, alloc_size); + new_addr = pages_map(ret, size, commit); + if (new_addr == ret) + return (ret); + if (new_addr) + pages_unmap(new_addr, size); + return (NULL); + } +#else + { + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) + pages_unmap(addr, leadsize); + if (trailsize != 0) + pages_unmap((void *)((uintptr_t)ret + size), trailsize); + return (ret); + } +#endif +} + +static bool +pages_commit_impl(void *addr, size_t size, bool commit) +{ + + if (os_overcommits) + return (true); + +#ifdef _WIN32 + return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, + PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); +#else + { + int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, + -1, 0); + if (result == MAP_FAILED) + return (true); + if (result != addr) { + /* + * We succeeded in mapping memory, but not in the right + * place. + */ + pages_unmap(result, size); + return (true); + } + return (false); + } +#endif +} + +bool +pages_commit(void *addr, size_t size) +{ + + return (pages_commit_impl(addr, size, true)); +} + +bool +pages_decommit(void *addr, size_t size) +{ + + return (pages_commit_impl(addr, size, false)); +} + +bool +pages_purge(void *addr, size_t size) +{ + bool unzeroed; + +#ifdef _WIN32 + VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); + unzeroed = true; +#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED)) +# if defined(JEMALLOC_PURGE_MADVISE_FREE) +# define JEMALLOC_MADV_PURGE MADV_FREE +# define JEMALLOC_MADV_ZEROS false +# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) +# define JEMALLOC_MADV_PURGE MADV_DONTNEED +# define JEMALLOC_MADV_ZEROS true +# else +# error No madvise(2) flag defined for purging unused dirty pages +# endif + int err = madvise(addr, size, JEMALLOC_MADV_PURGE); + unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); +# undef JEMALLOC_MADV_PURGE +# undef JEMALLOC_MADV_ZEROS +#else + /* Last resort no-op. */ + unzeroed = true; +#endif + return (unzeroed); +} + +bool +pages_huge(void *addr, size_t size) +{ + + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#else + return (false); +#endif +} + +bool +pages_nohuge(void *addr, size_t size) +{ + + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); +#else + return (false); +#endif +} + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +static bool +os_overcommits_sysctl(void) +{ + int vm_overcommit; + size_t sz; + + sz = sizeof(vm_overcommit); + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) + return (false); /* Error. */ + + return ((vm_overcommit & 0x3) == 0); +} +#endif + +#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY +/* + * Use syscall(2) rather than {open,read,close}(2) when possible to avoid + * reentry during bootstrapping if another library has interposed system call + * wrappers. + */ +static bool +os_overcommits_proc(void) +{ + int fd; + char buf[1]; + ssize_t nread; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); +#else + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); +#endif + if (fd == -1) + return (false); /* Error. */ + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) + nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); +#else + nread = read(fd, &buf, sizeof(buf)); +#endif + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 1) + return (false); /* Error. */ + /* + * /proc/sys/vm/overcommit_memory meanings: + * 0: Heuristic overcommit. + * 1: Always overcommit. + * 2: Never overcommit. + */ + return (buf[0] == '0' || buf[0] == '1'); +} +#endif + +void +pages_boot(void) +{ + +#ifndef _WIN32 + mmap_flags = MAP_PRIVATE | MAP_ANON; +#endif + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT + os_overcommits = os_overcommits_sysctl(); +#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) + os_overcommits = os_overcommits_proc(); +# ifdef MAP_NORESERVE + if (os_overcommits) + mmap_flags |= MAP_NORESERVE; +# endif +#else + os_overcommits = false; +#endif +} diff --git a/sources/jemalloc/src/je_prng.c b/sources/jemalloc/src/je_prng.c new file mode 100755 index 00000000..76646a2a --- /dev/null +++ b/sources/jemalloc/src/je_prng.c @@ -0,0 +1,2 @@ +#define JEMALLOC_PRNG_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_prof.c b/sources/jemalloc/src/je_prof.c new file mode 100755 index 00000000..c89dade1 --- /dev/null +++ b/sources/jemalloc/src/je_prof.c @@ -0,0 +1,2355 @@ +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_internal.h" +/******************************************************************************/ + +#ifdef JEMALLOC_PROF_LIBUNWIND +#define UNW_LOCAL_ONLY +#include +#endif + +#ifdef JEMALLOC_PROF_LIBGCC +#include +#endif + +/******************************************************************************/ +/* Data. */ + +bool opt_prof = false; +bool opt_prof_active = true; +bool opt_prof_thread_active_init = true; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; +bool opt_prof_final = false; +bool opt_prof_leak = false; +bool opt_prof_accum = false; +char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* + * Initialized as opt_prof_active, and accessed via + * prof_active_[gs]et{_unlocked,}(). + */ +bool prof_active; +static malloc_mutex_t prof_active_mtx; + +/* + * Initialized as opt_prof_thread_active_init, and accessed via + * prof_thread_active_init_[gs]et(). + */ +static bool prof_thread_active_init; +static malloc_mutex_t prof_thread_active_init_mtx; + +/* + * Initialized as opt_prof_gdump, and accessed via + * prof_gdump_[gs]et{_unlocked,}(). + */ +bool prof_gdump_val; +static malloc_mutex_t prof_gdump_mtx; + +uint64_t prof_interval = 0; + +size_t lg_prof_sample; + +/* + * Table of mutexes that are shared among gctx's. These are leaf locks, so + * there is no problem with using them for more than one gctx at the same time. + * The primary motivation for this sharing though is that gctx's are ephemeral, + * and destroying mutexes causes complications for systems that allocate when + * creating/destroying mutexes. + */ +static malloc_mutex_t *gctx_locks; +static unsigned cum_gctxs; /* Atomic counter. */ + +/* + * Table of mutexes that are shared among tdata's. No operations require + * holding multiple tdata locks, so there is no problem with using them for more + * than one tdata at the same time, even though a gctx lock may be acquired + * while holding a tdata lock. + */ +static malloc_mutex_t *tdata_locks; + +/* + * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data + * structure that knows about all backtraces currently captured. + */ +static ckh_t bt2gctx; +static malloc_mutex_t bt2gctx_mtx; + +/* + * Tree of all extant prof_tdata_t structures, regardless of state, + * {attached,detached,expired}. + */ +static prof_tdata_tree_t tdatas; +static malloc_mutex_t tdatas_mtx; + +static uint64_t next_thr_uid; +static malloc_mutex_t next_thr_uid_mtx; + +static malloc_mutex_t prof_dump_seq_mtx; +static uint64_t prof_dump_seq; +static uint64_t prof_dump_iseq; +static uint64_t prof_dump_mseq; +static uint64_t prof_dump_useq; + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. + */ +static malloc_mutex_t prof_dump_mtx; +static char prof_dump_buf[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PROF_DUMP_BUFSIZE +#else + 1 +#endif +]; +static size_t prof_dump_buf_end; +static int prof_dump_fd; + +/* Do not dump any profiles until bootstrapping is complete. */ +static bool prof_booted = false; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); +static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached); +static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); + +/******************************************************************************/ +/* Red-black trees. */ + +JEMALLOC_INLINE_C int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) +{ + uint64_t a_thr_uid = a->thr_uid; + uint64_t b_thr_uid = b->thr_uid; + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + if (ret == 0) { + uint64_t a_thr_discrim = a->thr_discrim; + uint64_t b_thr_discrim = b->thr_discrim; + ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < + b_thr_discrim); + if (ret == 0) { + uint64_t a_tctx_uid = a->tctx_uid; + uint64_t b_tctx_uid = b->tctx_uid; + ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < + b_tctx_uid); + } + } + return (ret); +} + +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, + tctx_link, prof_tctx_comp) + +JEMALLOC_INLINE_C int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) +{ + unsigned a_len = a->bt.len; + unsigned b_len = b->bt.len; + unsigned comp_len = (a_len < b_len) ? a_len : b_len; + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + if (ret == 0) + ret = (a_len > b_len) - (a_len < b_len); + return (ret); +} + +rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, + prof_gctx_comp) + +JEMALLOC_INLINE_C int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) +{ + int ret; + uint64_t a_uid = a->thr_uid; + uint64_t b_uid = b->thr_uid; + + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; + + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + } + return (ret); +} + +rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, + prof_tdata_comp) + +/******************************************************************************/ + +void +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + if (updated) { + /* + * Compute a new sample threshold. This isn't very important in + * practice, because this function is rarely executed, so the + * potential for sample bias is minimal except in contrived + * programs. + */ + tdata = prof_tdata_get(tsd, true); + if (tdata != NULL) + prof_sample_threshold_update(tdata); + } + + if ((uintptr_t)tctx > (uintptr_t)1U) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + tctx->prepared = false; + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) + prof_tctx_destroy(tsd, tctx); + else + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } +} + +void +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) +{ + + prof_tctx_set(tsdn, ptr, usize, tctx); + + malloc_mutex_lock(tsdn, tctx->tdata->lock); + tctx->cnts.curobjs++; + tctx->cnts.curbytes += usize; + if (opt_prof_accum) { + tctx->cnts.accumobjs++; + tctx->cnts.accumbytes += usize; + } + tctx->prepared = false; + malloc_mutex_unlock(tsdn, tctx->tdata->lock); +} + +void +prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) +{ + + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + assert(tctx->cnts.curobjs > 0); + assert(tctx->cnts.curbytes >= usize); + tctx->cnts.curobjs--; + tctx->cnts.curbytes -= usize; + + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) + prof_tctx_destroy(tsd, tctx); + else + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); +} + +void +bt_init(prof_bt_t *bt, void **vec) +{ + + cassert(config_prof); + + bt->vec = vec; + bt->len = 0; +} + +JEMALLOC_INLINE_C void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) +{ + + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + if (tdata != NULL) { + assert(!tdata->enq); + tdata->enq = true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); +} + +JEMALLOC_INLINE_C void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) +{ + + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + if (tdata != NULL) { + bool idump, gdump; + + assert(tdata->enq); + tdata->enq = false; + idump = tdata->enq_idump; + tdata->enq_idump = false; + gdump = tdata->enq_gdump; + tdata->enq_gdump = false; + + if (idump) + prof_idump(tsd_tsdn(tsd)); + if (gdump) + prof_gdump(tsd_tsdn(tsd)); + } +} + +#ifdef JEMALLOC_PROF_LIBUNWIND +void +prof_backtrace(prof_bt_t *bt) +{ + int nframes; + + cassert(config_prof); + assert(bt->len == 0); + assert(bt->vec != NULL); + + nframes = unw_backtrace(bt->vec, PROF_BT_MAX); + if (nframes <= 0) + return; + bt->len = nframes; +} +#elif (defined(JEMALLOC_PROF_LIBGCC)) +static _Unwind_Reason_Code +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) +{ + + cassert(config_prof); + + return (_URC_NO_REASON); +} + +static _Unwind_Reason_Code +prof_unwind_callback(struct _Unwind_Context *context, void *arg) +{ + prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + void *ip; + + cassert(config_prof); + + ip = (void *)_Unwind_GetIP(context); + if (ip == NULL) + return (_URC_END_OF_STACK); + data->bt->vec[data->bt->len] = ip; + data->bt->len++; + if (data->bt->len == data->max) + return (_URC_END_OF_STACK); + + return (_URC_NO_REASON); +} + +void +prof_backtrace(prof_bt_t *bt) +{ + prof_unwind_data_t data = {bt, PROF_BT_MAX}; + + cassert(config_prof); + + _Unwind_Backtrace(prof_unwind_callback, &data); +} +#elif (defined(JEMALLOC_PROF_GCC)) +void +prof_backtrace(prof_bt_t *bt) +{ +#define BT_FRAME(i) \ + if ((i) < PROF_BT_MAX) { \ + void *p; \ + if (__builtin_frame_address(i) == 0) \ + return; \ + p = __builtin_return_address(i); \ + if (p == NULL) \ + return; \ + bt->vec[(i)] = p; \ + bt->len = (i) + 1; \ + } else \ + return; + + cassert(config_prof); + + BT_FRAME(0) + BT_FRAME(1) + BT_FRAME(2) + BT_FRAME(3) + BT_FRAME(4) + BT_FRAME(5) + BT_FRAME(6) + BT_FRAME(7) + BT_FRAME(8) + BT_FRAME(9) + + BT_FRAME(10) + BT_FRAME(11) + BT_FRAME(12) + BT_FRAME(13) + BT_FRAME(14) + BT_FRAME(15) + BT_FRAME(16) + BT_FRAME(17) + BT_FRAME(18) + BT_FRAME(19) + + BT_FRAME(20) + BT_FRAME(21) + BT_FRAME(22) + BT_FRAME(23) + BT_FRAME(24) + BT_FRAME(25) + BT_FRAME(26) + BT_FRAME(27) + BT_FRAME(28) + BT_FRAME(29) + + BT_FRAME(30) + BT_FRAME(31) + BT_FRAME(32) + BT_FRAME(33) + BT_FRAME(34) + BT_FRAME(35) + BT_FRAME(36) + BT_FRAME(37) + BT_FRAME(38) + BT_FRAME(39) + + BT_FRAME(40) + BT_FRAME(41) + BT_FRAME(42) + BT_FRAME(43) + BT_FRAME(44) + BT_FRAME(45) + BT_FRAME(46) + BT_FRAME(47) + BT_FRAME(48) + BT_FRAME(49) + + BT_FRAME(50) + BT_FRAME(51) + BT_FRAME(52) + BT_FRAME(53) + BT_FRAME(54) + BT_FRAME(55) + BT_FRAME(56) + BT_FRAME(57) + BT_FRAME(58) + BT_FRAME(59) + + BT_FRAME(60) + BT_FRAME(61) + BT_FRAME(62) + BT_FRAME(63) + BT_FRAME(64) + BT_FRAME(65) + BT_FRAME(66) + BT_FRAME(67) + BT_FRAME(68) + BT_FRAME(69) + + BT_FRAME(70) + BT_FRAME(71) + BT_FRAME(72) + BT_FRAME(73) + BT_FRAME(74) + BT_FRAME(75) + BT_FRAME(76) + BT_FRAME(77) + BT_FRAME(78) + BT_FRAME(79) + + BT_FRAME(80) + BT_FRAME(81) + BT_FRAME(82) + BT_FRAME(83) + BT_FRAME(84) + BT_FRAME(85) + BT_FRAME(86) + BT_FRAME(87) + BT_FRAME(88) + BT_FRAME(89) + + BT_FRAME(90) + BT_FRAME(91) + BT_FRAME(92) + BT_FRAME(93) + BT_FRAME(94) + BT_FRAME(95) + BT_FRAME(96) + BT_FRAME(97) + BT_FRAME(98) + BT_FRAME(99) + + BT_FRAME(100) + BT_FRAME(101) + BT_FRAME(102) + BT_FRAME(103) + BT_FRAME(104) + BT_FRAME(105) + BT_FRAME(106) + BT_FRAME(107) + BT_FRAME(108) + BT_FRAME(109) + + BT_FRAME(110) + BT_FRAME(111) + BT_FRAME(112) + BT_FRAME(113) + BT_FRAME(114) + BT_FRAME(115) + BT_FRAME(116) + BT_FRAME(117) + BT_FRAME(118) + BT_FRAME(119) + + BT_FRAME(120) + BT_FRAME(121) + BT_FRAME(122) + BT_FRAME(123) + BT_FRAME(124) + BT_FRAME(125) + BT_FRAME(126) + BT_FRAME(127) +#undef BT_FRAME +} +#else +void +prof_backtrace(prof_bt_t *bt) +{ + + cassert(config_prof); + not_reached(); +} +#endif + +static malloc_mutex_t * +prof_gctx_mutex_choose(void) +{ + unsigned ngctxs = atomic_add_u(&cum_gctxs, 1); + + return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]); +} + +static malloc_mutex_t * +prof_tdata_mutex_choose(uint64_t thr_uid) +{ + + return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); +} + +static prof_gctx_t * +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) +{ + /* + * Create a single allocation that has space for vec of length bt->len. + */ + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) + return (NULL); + gctx->lock = prof_gctx_mutex_choose(); + /* + * Set nlimbo to 1, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + gctx->nlimbo = 1; + tctx_tree_new(&gctx->tctxs); + /* Duplicate bt. */ + memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); + gctx->bt.vec = gctx->vec; + gctx->bt.len = bt->len; + return (gctx); +} + +static void +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, + prof_tdata_t *tdata) +{ + + cassert(config_prof); + + /* + * Check that gctx is still unused by any thread cache before destroying + * it. prof_lookup() increments gctx->nlimbo in order to avoid a race + * condition with this function, as does prof_tctx_destroy() in order to + * avoid a race between the main body of prof_tctx_destroy() and entry + * into this function. + */ + prof_enter(tsd, tdata_self); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + assert(gctx->nlimbo != 0); + if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { + /* Remove gctx from bt2gctx. */ + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) + not_reached(); + prof_leave(tsd, tdata_self); + /* Destroy gctx. */ + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true); + } else { + /* + * Compensate for increment in prof_tctx_destroy() or + * prof_lookup(). + */ + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_leave(tsd, tdata_self); + } +} + +static bool +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) +{ + + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + if (opt_prof_accum) + return (false); + if (tctx->cnts.curobjs != 0) + return (false); + if (tctx->prepared) + return (false); + return (true); +} + +static bool +prof_gctx_should_destroy(prof_gctx_t *gctx) +{ + + if (opt_prof_accum) + return (false); + if (!tctx_tree_empty(&gctx->tctxs)) + return (false); + if (gctx->nlimbo != 0) + return (false); + return (true); +} + +static void +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) +{ + prof_tdata_t *tdata = tctx->tdata; + prof_gctx_t *gctx = tctx->gctx; + bool destroy_tdata, destroy_tctx, destroy_gctx; + + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + assert(tctx->cnts.curobjs == 0); + assert(tctx->cnts.curbytes == 0); + assert(!opt_prof_accum); + assert(tctx->cnts.accumobjs == 0); + assert(tctx->cnts.accumbytes == 0); + + ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + switch (tctx->state) { + case prof_tctx_state_nominal: + tctx_tree_remove(&gctx->tctxs, tctx); + destroy_tctx = true; + if (prof_gctx_should_destroy(gctx)) { + /* + * Increment gctx->nlimbo in order to keep another + * thread from winning the race to destroy gctx while + * this one has gctx->lock dropped. Without this, it + * would be possible for another thread to: + * + * 1) Sample an allocation associated with gctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_gctx_try_destroy(gctx). + * + * The result would be that gctx no longer exists by the + * time this thread accesses it in + * prof_gctx_try_destroy(). + */ + gctx->nlimbo++; + destroy_gctx = true; + } else + destroy_gctx = false; + break; + case prof_tctx_state_dumping: + /* + * A dumping thread needs tctx to remain valid until dumping + * has finished. Change state such that the dumping thread will + * complete destruction during a late dump iteration phase. + */ + tctx->state = prof_tctx_state_purgatory; + destroy_tctx = false; + destroy_gctx = false; + break; + default: + not_reached(); + destroy_tctx = false; + destroy_gctx = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + if (destroy_gctx) { + prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, + tdata); + } + + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (destroy_tdata) + prof_tdata_destroy(tsd, tdata, false); + + if (destroy_tctx) + idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true); +} + +static bool +prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) +{ + union { + prof_gctx_t *p; + void *v; + } gctx; + union { + prof_bt_t *p; + void *v; + } btkey; + bool new_gctx; + + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + /* bt has never been seen before. Insert it. */ + gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (gctx.v == NULL) { + prof_leave(tsd, tdata); + return (true); + } + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true); + return (true); + } + new_gctx = true; + } else { + /* + * Increment nlimbo, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); + gctx.p->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); + new_gctx = false; + } + prof_leave(tsd, tdata); + + *p_btkey = btkey.v; + *p_gctx = gctx.p; + *p_new_gctx = new_gctx; + return (false); +} + +prof_tctx_t * +prof_lookup(tsd_t *tsd, prof_bt_t *bt) +{ + union { + prof_tctx_t *p; + void *v; + } ret; + prof_tdata_t *tdata; + bool not_found; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) + return (NULL); + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); + if (!not_found) /* Note double negative! */ + ret.p->prepared = true; + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (not_found) { + void *btkey; + prof_gctx_t *gctx; + bool new_gctx, error; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, + &new_gctx)) + return (NULL); + + /* Link a prof_tctx_t into gctx for this thread. */ + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); + if (ret.p == NULL) { + if (new_gctx) + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + return (NULL); + } + ret.p->tdata = tdata; + ret.p->thr_uid = tdata->thr_uid; + ret.p->thr_discrim = tdata->thr_discrim; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + ret.p->gctx = gctx; + ret.p->tctx_uid = tdata->tctx_uid_next++; + ret.p->prepared = true; + ret.p->state = prof_tctx_state_initializing; + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (error) { + if (new_gctx) + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true); + return (NULL); + } + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + ret.p->state = prof_tctx_state_nominal; + tctx_tree_insert(&gctx->tctxs, ret.p); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + + return (ret.p); +} + +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ +void +prof_sample_threshold_update(prof_tdata_t *tdata) +{ +#ifdef JEMALLOC_PROF + uint64_t r; + double u; + + if (!config_prof) + return; + + if (lg_prof_sample == 0) { + tdata->bytes_until_sample = 0; + return; + } + + /* + * Compute sample interval as a geometrically distributed random + * variable with mean (2^lg_prof_sample). + * + * __ __ + * | log(u) | 1 + * tdata->bytes_until_sample = | -------- |, where p = --------------- + * | log(1-p) | lg_prof_sample + * 2 + * + * For more information on the math, see: + * + * Non-Uniform Random Variate Generation + * Luc Devroye + * Springer-Verlag, New York, 1986 + * pp 500 + * (http://luc.devroye.org/rnbookindex.html) + */ + r = prng_lg_range_u64(&tdata->prng_state, 53); + u = (double)r * (1.0/9007199254740992.0L); + tdata->bytes_until_sample = (uint64_t)(log(u) / + log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + + (uint64_t)1U; +#endif +} + +#ifdef JEMALLOC_JET +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +{ + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return (NULL); +} + +size_t +prof_tdata_count(void) +{ + size_t tdata_count = 0; + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + + return (tdata_count); +} +#endif + +#ifdef JEMALLOC_JET +size_t +prof_bt_count(void) +{ + size_t bt_count; + tsd_t *tsd; + prof_tdata_t *tdata; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) + return (0); + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); + bt_count = ckh_count(&bt2gctx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + return (bt_count); +} +#endif + +#ifdef JEMALLOC_JET +#undef prof_dump_open +#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) +#endif +static int +prof_dump_open(bool propagate_err, const char *filename) +{ + int fd; + + fd = creat(filename, 0644); + if (fd == -1 && !propagate_err) { + malloc_printf(": creat(\"%s\"), 0644) failed\n", + filename); + if (opt_abort) + abort(); + } + + return (fd); +} +#ifdef JEMALLOC_JET +#undef prof_dump_open +#define prof_dump_open JEMALLOC_N(prof_dump_open) +prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); +#endif + +static bool +prof_dump_flush(bool propagate_err) +{ + bool ret = false; + ssize_t err; + + cassert(config_prof); + + err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + if (err == -1) { + if (!propagate_err) { + malloc_write(": write() failed during heap " + "profile flush\n"); + if (opt_abort) + abort(); + } + ret = true; + } + prof_dump_buf_end = 0; + + return (ret); +} + +static bool +prof_dump_close(bool propagate_err) +{ + bool ret; + + assert(prof_dump_fd != -1); + ret = prof_dump_flush(propagate_err); + close(prof_dump_fd); + prof_dump_fd = -1; + + return (ret); +} + +static bool +prof_dump_write(bool propagate_err, const char *s) +{ + size_t i, slen, n; + + cassert(config_prof); + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) + if (prof_dump_flush(propagate_err) && propagate_err) + return (true); + + if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } + + return (false); +} + +JEMALLOC_FORMAT_PRINTF(2, 3) +static bool +prof_dump_printf(bool propagate_err, const char *format, ...) +{ + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_dump_write(propagate_err, buf); + + return (ret); +} + +static void +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) +{ + + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + return; + case prof_tctx_state_nominal: + tctx->state = prof_tctx_state_dumping; + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + + memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); + + tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + tdata->cnt_summed.accumobjs += + tctx->dump_cnts.accumobjs; + tdata->cnt_summed.accumbytes += + tctx->dump_cnts.accumbytes; + } + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + not_reached(); + } +} + +static void +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) +{ + + malloc_mutex_assert_owner(tsdn, gctx->lock); + + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; + gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; + } +} + +static prof_tctx_t * +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) +{ + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); + break; + default: + not_reached(); + } + + return (NULL); +} + +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_tctx_t * +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) +{ + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + if (prof_dump_printf(arg->propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " + "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, + tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, + tctx->dump_cnts.accumbytes)) + return (tctx); + break; + default: + not_reached(); + } + return (NULL); +} + +static prof_tctx_t * +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) +{ + tsdn_t *tsdn = (tsdn_t *)arg; + prof_tctx_t *ret; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + tctx->state = prof_tctx_state_nominal; + break; + case prof_tctx_state_purgatory: + ret = tctx; + goto label_return; + default: + not_reached(); + } + + ret = NULL; +label_return: + return (ret); +} + +static void +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) +{ + + cassert(config_prof); + + malloc_mutex_lock(tsdn, gctx->lock); + + /* + * Increment nlimbo so that gctx won't go away before dump. + * Additionally, link gctx into the dump list so that it is included in + * prof_dump()'s second pass. + */ + gctx->nlimbo++; + gctx_tree_insert(gctxs, gctx); + + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); + + malloc_mutex_unlock(tsdn, gctx->lock); +} + +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; + +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) +{ + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) + arg->leak_ngctx++; + malloc_mutex_unlock(arg->tsdn, gctx->lock); + + return (NULL); +} + +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) +{ + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + prof_gctx_t *gctx; + + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + { + prof_tctx_t *next; + + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, true, true); + } else + next = NULL; + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } else + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } +} + +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + +static prof_tdata_t * +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) +{ + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, tdata->lock); + if (!tdata->expired) { + size_t tabind; + union { + prof_tctx_t *p; + void *v; + } tctx; + + tdata->dumping = true; + memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); + for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, + &tctx.v);) + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; + if (opt_prof_accum) { + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; + } + } else + tdata->dumping = false; + malloc_mutex_unlock(arg->tsdn, tdata->lock); + + return (NULL); +} + +static prof_tdata_t * +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +{ + bool propagate_err = *(bool *)arg; + + if (!tdata->dumping) + return (NULL); + + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", + tdata->thr_uid, tdata->cnt_summed.curobjs, + tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, + tdata->cnt_summed.accumbytes, + (tdata->thread_name != NULL) ? " " : "", + (tdata->thread_name != NULL) ? tdata->thread_name : "")) + return (tdata); + return (NULL); +} + +#ifdef JEMALLOC_JET +#undef prof_dump_header +#define prof_dump_header JEMALLOC_N(prof_dump_header_impl) +#endif +static bool +prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) +{ + bool ret; + + if (prof_dump_printf(propagate_err, + "heap_v2/%"FMTu64"\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) + return (true); + + malloc_mutex_lock(tsdn, &tdatas_mtx); + ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, + (void *)&propagate_err) != NULL); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + return (ret); +} +#ifdef JEMALLOC_JET +#undef prof_dump_header +#define prof_dump_header JEMALLOC_N(prof_dump_header) +prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); +#endif + +static bool +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) +{ + bool ret; + unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; + + cassert(config_prof); + malloc_mutex_assert_owner(tsdn, gctx->lock); + + /* Avoid dumping such gctx's that have no useful data. */ + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + assert(gctx->cnt_summed.curobjs == 0); + assert(gctx->cnt_summed.curbytes == 0); + assert(gctx->cnt_summed.accumobjs == 0); + assert(gctx->cnt_summed.accumbytes == 0); + ret = false; + goto label_return; + } + + if (prof_dump_printf(propagate_err, "@")) { + ret = true; + goto label_return; + } + for (i = 0; i < bt->len; i++) { + if (prof_dump_printf(propagate_err, " %#"FMTxPTR, + (uintptr_t)bt->vec[i])) { + ret = true; + goto label_return; + } + } + + if (prof_dump_printf(propagate_err, + "\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, + gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { + ret = true; + goto label_return; + } + + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; + if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, + (void *)&prof_tctx_dump_iter_arg) != NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return (ret); +} + +#ifndef _WIN32 +JEMALLOC_FORMAT_PRINTF(1, 2) +static int +prof_open_maps(const char *format, ...) +{ + int mfd; + va_list ap; + char filename[PATH_MAX + 1]; + + va_start(ap, format); + malloc_vsnprintf(filename, sizeof(filename), format, ap); + va_end(ap); + mfd = open(filename, O_RDONLY); + + return (mfd); +} +#endif + +static int +prof_getpid(void) +{ + +#ifdef _WIN32 + return (GetCurrentProcessId()); +#else + return (getpid()); +#endif +} + +static bool +prof_dump_maps(bool propagate_err) +{ + bool ret; + int mfd; + + cassert(config_prof); +#ifdef __FreeBSD__ + mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented +#else + { + int pid = prof_getpid(); + + mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); + if (mfd == -1) + mfd = prof_open_maps("/proc/%d/maps", pid); + } +#endif + if (mfd != -1) { + ssize_t nread; + + if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) { + ret = true; + goto label_return; + } + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_dump_flush(propagate_err) && + propagate_err) { + ret = true; + goto label_return; + } + } + nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], + PROF_DUMP_BUFSIZE - prof_dump_buf_end); + } while (nread > 0); + } else { + ret = true; + goto label_return; + } + + ret = false; +label_return: + if (mfd != -1) + close(mfd); + return (ret); +} + +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ +static void +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, + const char *filename) +{ + +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ + if (cnt_all->curbytes != 0) { + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf(": Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + malloc_printf( + ": Run jeprof on \"%s\" for leak detail\n", + filename); + } +#endif +} + +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_gctx_t * +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) +{ + prof_gctx_t *ret; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { + ret = gctx; + goto label_return; + } + + ret = NULL; +label_return: + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return (ret); +} + +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) +{ + prof_tdata_t *tdata; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + size_t tabind; + union { + prof_gctx_t *p; + void *v; + } gctx; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; + prof_gctx_tree_t gctxs; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (true); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + prof_enter(tsd, tdata); + + /* + * Put gctx's in limbo and clear their counters in preparation for + * summing. + */ + gctx_tree_new(&gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs); + + /* + * Iterate over tdatas, and for the non-expired ones snapshot their tctx + * stats and merge them into the associated gctx's. + */ + prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)&prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + /* Merge tctx stats into gctx's. */ + prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg.leak_ngctx = 0; + gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, + (void *)&prof_gctx_merge_iter_arg); + + prof_leave(tsd, tdata); + + /* Create dump file. */ + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) + goto label_open_close_error; + + /* Dump profile header. */ + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg.cnt_all)) + goto label_write_error; + + /* Dump per gctx profile stats. */ + prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg.propagate_err = propagate_err; + if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, + (void *)&prof_gctx_dump_iter_arg) != NULL) + goto label_write_error; + + /* Dump /proc//maps if possible. */ + if (prof_dump_maps(propagate_err)) + goto label_write_error; + + if (prof_dump_close(propagate_err)) + goto label_open_close_error; + + prof_gctx_finish(tsd, &gctxs); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } + return (false); +label_write_error: + prof_dump_close(propagate_err); +label_open_close_error: + prof_gctx_finish(tsd, &gctxs); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + return (true); +} + +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +static void +prof_dump_filename(char *filename, char v, uint64_t vseq) +{ + + cassert(config_prof); + + if (vseq != VSEQ_INVALID) { + /* "...v.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c%"FMTu64".heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); + } else { + /* "....heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c.heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); + } + prof_dump_seq++; +} + +static void +prof_fdump(void) +{ + tsd_t *tsd; + char filename[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + assert(opt_prof_final); + assert(opt_prof_prefix[0] != '\0'); + + if (!prof_booted) + return; + tsd = tsd_fetch(); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', VSEQ_INVALID); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, opt_prof_leak); +} + +void +prof_idump(tsdn_t *tsdn) +{ + tsd_t *tsd; + prof_tdata_t *tdata; + + cassert(config_prof); + + if (!prof_booted || tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) + return; + if (tdata->enq) { + tdata->enq_idump = true; + return; + } + + if (opt_prof_prefix[0] != '\0') { + char filename[PATH_MAX + 1]; + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename, 'i', prof_dump_iseq); + prof_dump_iseq++; + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); + } +} + +bool +prof_mdump(tsd_t *tsd, const char *filename) +{ + char filename_buf[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + + if (!opt_prof || !prof_booted) + return (true); + + if (filename == NULL) { + /* No filename specified, so automatically generate one. */ + if (opt_prof_prefix[0] == '\0') + return (true); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename_buf, 'm', prof_dump_mseq); + prof_dump_mseq++; + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + filename = filename_buf; + } + return (prof_dump(tsd, true, filename, false)); +} + +void +prof_gdump(tsdn_t *tsdn) +{ + tsd_t *tsd; + prof_tdata_t *tdata; + + cassert(config_prof); + + if (!prof_booted || tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) + return; + if (tdata->enq) { + tdata->enq_gdump = true; + return; + } + + if (opt_prof_prefix[0] != '\0') { + char filename[DUMP_FILENAME_BUFSIZE]; + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); + prof_dump_filename(filename, 'u', prof_dump_useq); + prof_dump_useq++; + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); + } +} + +static void +prof_bt_hash(const void *key, size_t r_hash[2]) +{ + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + + hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); +} + +static bool +prof_bt_keycomp(const void *k1, const void *k2) +{ + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + + if (bt1->len != bt2->len) + return (false); + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + +JEMALLOC_INLINE_C uint64_t +prof_thr_uid_alloc(tsdn_t *tsdn) +{ + uint64_t thr_uid; + + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); + thr_uid = next_thr_uid; + next_thr_uid++; + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); + + return (thr_uid); +} + +static prof_tdata_t * +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, + char *thread_name, bool active) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) + return (NULL); + + tdata->lock = prof_tdata_mutex_choose(thr_uid); + tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; + tdata->thread_name = thread_name; + tdata->attached = true; + tdata->expired = false; + tdata->tctx_uid_next = 0; + + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); + return (NULL); + } + + tdata->prng_state = (uint64_t)(uintptr_t)tdata; + prof_sample_threshold_update(tdata); + + tdata->enq = false; + tdata->enq_idump = false; + tdata->enq_gdump = false; + + tdata->dumping = false; + tdata->active = active; + + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_insert(&tdatas, tdata); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + return (tdata); +} + +prof_tdata_t * +prof_tdata_init(tsd_t *tsd) +{ + + return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd)))); +} + +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) +{ + + if (tdata->attached && !even_if_attached) + return (false); + if (ckh_count(&tdata->bt2tctx) != 0) + return (false); + return (true); +} + +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) +{ + + malloc_mutex_assert_owner(tsdn, tdata->lock); + + return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); +} + +static void +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached) +{ + + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); + + tdata_tree_remove(&tdatas, tdata); + + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + + if (tdata->thread_name != NULL) + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); + ckh_delete(tsd, &tdata->bt2tctx); + idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); +} + +static void +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) +{ + + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata, even_if_attached); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); +} + +static void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) +{ + bool destroy_tdata; + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + if (tdata->attached) { + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); + /* + * Only detach if !destroy_tdata, because detaching would allow + * another thread to win the race to destroy tdata. + */ + if (!destroy_tdata) + tdata->attached = false; + tsd_prof_tdata_set(tsd, NULL); + } else + destroy_tdata = false; + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) + prof_tdata_destroy(tsd, tdata, true); +} + +prof_tdata_t * +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) +{ + uint64_t thr_uid = tdata->thr_uid; + uint64_t thr_discrim = tdata->thr_discrim + 1; + char *thread_name = (tdata->thread_name != NULL) ? + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; + bool active = tdata->active; + + prof_tdata_detach(tsd, tdata); + return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active)); +} + +static bool +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) +{ + bool destroy_tdata; + + malloc_mutex_lock(tsdn, tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = tdata->attached ? false : + prof_tdata_should_destroy(tsdn, tdata, false); + } else + destroy_tdata = false; + malloc_mutex_unlock(tsdn, tdata->lock); + + return (destroy_tdata); +} + +static prof_tdata_t * +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +{ + tsdn_t *tsdn = (tsdn_t *)arg; + + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); +} + +void +prof_reset(tsd_t *tsd, size_t lg_sample) +{ + prof_tdata_t *next; + + assert(lg_sample < (sizeof(uint64_t) << 3)); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + + lg_prof_sample = lg_sample; + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, (void *)tsd); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + prof_tdata_destroy_locked(tsd, to_destroy, false); + } else + next = NULL; + } while (next != NULL); + + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); +} + +void +prof_tdata_cleanup(tsd_t *tsd) +{ + prof_tdata_t *tdata; + + if (!config_prof) + return; + + tdata = tsd_prof_tdata_get(tsd); + if (tdata != NULL) + prof_tdata_detach(tsd, tdata); +} + +bool +prof_active_get(tsdn_t *tsdn) +{ + bool prof_active_current; + + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_current = prof_active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return (prof_active_current); +} + +bool +prof_active_set(tsdn_t *tsdn, bool active) +{ + bool prof_active_old; + + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_old = prof_active; + prof_active = active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return (prof_active_old); +} + +const char * +prof_thread_name_get(tsd_t *tsd) +{ + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (""); + return (tdata->thread_name != NULL ? tdata->thread_name : ""); +} + +static char * +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) +{ + char *ret; + size_t size; + + if (thread_name == NULL) + return (NULL); + + size = strlen(thread_name) + 1; + if (size == 1) + return (""); + + ret = iallocztm(tsdn, size, size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) + return (NULL); + memcpy(ret, thread_name, size); + return (ret); +} + +int +prof_thread_name_set(tsd_t *tsd, const char *thread_name) +{ + prof_tdata_t *tdata; + unsigned i; + char *s; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (EAGAIN); + + /* Validate input. */ + if (thread_name == NULL) + return (EFAULT); + for (i = 0; thread_name[i] != '\0'; i++) { + char c = thread_name[i]; + if (!isgraph(c) && !isblank(c)) + return (EFAULT); + } + + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); + if (s == NULL) + return (EAGAIN); + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); + tdata->thread_name = NULL; + } + if (strlen(s) > 0) + tdata->thread_name = s; + return (0); +} + +bool +prof_thread_active_get(tsd_t *tsd) +{ + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (false); + return (tdata->active); +} + +bool +prof_thread_active_set(tsd_t *tsd, bool active) +{ + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (true); + tdata->active = active; + return (false); +} + +bool +prof_thread_active_init_get(tsdn_t *tsdn) +{ + bool active_init; + + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); + active_init = prof_thread_active_init; + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return (active_init); +} + +bool +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) +{ + bool active_init_old; + + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); + active_init_old = prof_thread_active_init; + prof_thread_active_init = active_init; + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return (active_init_old); +} + +bool +prof_gdump_get(tsdn_t *tsdn) +{ + bool prof_gdump_current; + + malloc_mutex_lock(tsdn, &prof_gdump_mtx); + prof_gdump_current = prof_gdump_val; + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return (prof_gdump_current); +} + +bool +prof_gdump_set(tsdn_t *tsdn, bool gdump) +{ + bool prof_gdump_old; + + malloc_mutex_lock(tsdn, &prof_gdump_mtx); + prof_gdump_old = prof_gdump_val; + prof_gdump_val = gdump; + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return (prof_gdump_old); +} + +void +prof_boot0(void) +{ + + cassert(config_prof); + + memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, + sizeof(PROF_PREFIX_DEFAULT)); +} + +void +prof_boot1(void) +{ + + cassert(config_prof); + + /* + * opt_prof must be in its final state before any arenas are + * initialized, so this function must be executed early. + */ + + if (opt_prof_leak && !opt_prof) { + /* + * Enable opt_prof, but in such a way that profiles are never + * automatically dumped. + */ + opt_prof = true; + opt_prof_gdump = false; + } else if (opt_prof) { + if (opt_lg_prof_interval >= 0) { + prof_interval = (((uint64_t)1U) << + opt_lg_prof_interval); + } + } +} + +bool +prof_boot2(tsd_t *tsd) +{ + + cassert(config_prof); + + if (opt_prof) { + unsigned i; + + lg_prof_sample = opt_lg_prof_sample; + + prof_active = opt_prof_active; + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE)) + return (true); + + prof_gdump_val = opt_prof_gdump; + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP)) + return (true); + + prof_thread_active_init = opt_prof_thread_active_init; + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) + return (true); + + if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) + return (true); + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX)) + return (true); + + tdata_tree_new(&tdatas); + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS)) + return (true); + + next_thr_uid = 0; + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID)) + return (true); + + if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", + WITNESS_RANK_PROF_DUMP_SEQ)) + return (true); + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP)) + return (true); + + if (opt_prof_final && opt_prof_prefix[0] != '\0' && + atexit(prof_fdump) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) + abort(); + } + + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + PROF_NCTX_LOCKS * sizeof(malloc_mutex_t)); + if (gctx_locks == NULL) + return (true); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", + WITNESS_RANK_PROF_GCTX)) + return (true); + } + + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t)); + if (tdata_locks == NULL) + return (true); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", + WITNESS_RANK_PROF_TDATA)) + return (true); + } + } + +#ifdef JEMALLOC_PROF_LIBGCC + /* + * Cause the backtracing machinery to allocate its internal state + * before enabling profiling. + */ + _Unwind_Backtrace(prof_unwind_init_callback, NULL); +#endif + + prof_booted = true; + + return (false); +} + +void +prof_prefork0(tsdn_t *tsdn) +{ + + if (opt_prof) { + unsigned i; + + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_prefork(tsdn, &tdata_locks[i]); + for (i = 0; i < PROF_NCTX_LOCKS; i++) + malloc_mutex_prefork(tsdn, &gctx_locks[i]); + } +} + +void +prof_prefork1(tsdn_t *tsdn) +{ + + if (opt_prof) { + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); + } +} + +void +prof_postfork_parent(tsdn_t *tsdn) +{ + + if (opt_prof) { + unsigned i; + + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); + } +} + +void +prof_postfork_child(tsdn_t *tsdn) +{ + + if (opt_prof) { + unsigned i; + + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); + } +} + +/******************************************************************************/ diff --git a/sources/jemalloc/src/je_quarantine.c b/sources/jemalloc/src/je_quarantine.c new file mode 100755 index 00000000..18903fb5 --- /dev/null +++ b/sources/jemalloc/src/je_quarantine.c @@ -0,0 +1,183 @@ +#define JEMALLOC_QUARANTINE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/* + * Quarantine pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) +#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) +#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); +static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine); +static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, + size_t upper_bound); + +/******************************************************************************/ + +static quarantine_t * +quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs) +{ + quarantine_t *quarantine; + size_t size; + + size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) * + sizeof(quarantine_obj_t)); + quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size), + false, NULL, true, arena_get(TSDN_NULL, 0, true), true); + if (quarantine == NULL) + return (NULL); + quarantine->curbytes = 0; + quarantine->curobjs = 0; + quarantine->first = 0; + quarantine->lg_maxobjs = lg_maxobjs; + + return (quarantine); +} + +void +quarantine_alloc_hook_work(tsd_t *tsd) +{ + quarantine_t *quarantine; + + if (!tsd_nominal(tsd)) + return; + + quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT); + /* + * Check again whether quarantine has been initialized, because + * quarantine_init() may have triggered recursive initialization. + */ + if (tsd_quarantine_get(tsd) == NULL) + tsd_quarantine_set(tsd, quarantine); + else + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); +} + +static quarantine_t * +quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) +{ + quarantine_t *ret; + + ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1); + if (ret == NULL) { + quarantine_drain_one(tsd_tsdn(tsd), quarantine); + return (quarantine); + } + + ret->curbytes = quarantine->curbytes; + ret->curobjs = quarantine->curobjs; + if (quarantine->first + quarantine->curobjs <= (ZU(1) << + quarantine->lg_maxobjs)) { + /* objs ring buffer data are contiguous. */ + memcpy(ret->objs, &quarantine->objs[quarantine->first], + quarantine->curobjs * sizeof(quarantine_obj_t)); + } else { + /* objs ring buffer data wrap around. */ + size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - + quarantine->first; + size_t ncopy_b = quarantine->curobjs - ncopy_a; + + memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a + * sizeof(quarantine_obj_t)); + memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * + sizeof(quarantine_obj_t)); + } + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); + + tsd_quarantine_set(tsd, ret); + return (ret); +} + +static void +quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine) +{ + quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; + assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof)); + idalloctm(tsdn, obj->ptr, NULL, false, true); + quarantine->curbytes -= obj->usize; + quarantine->curobjs--; + quarantine->first = (quarantine->first + 1) & ((ZU(1) << + quarantine->lg_maxobjs) - 1); +} + +static void +quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound) +{ + + while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) + quarantine_drain_one(tsdn, quarantine); +} + +void +quarantine(tsd_t *tsd, void *ptr) +{ + quarantine_t *quarantine; + size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + + cassert(config_fill); + assert(opt_quarantine); + + if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { + idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); + return; + } + /* + * Drain one or more objects if the quarantine size limit would be + * exceeded by appending ptr. + */ + if (quarantine->curbytes + usize > opt_quarantine) { + size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine + - usize : 0; + quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound); + } + /* Grow the quarantine ring buffer if it's full. */ + if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) + quarantine = quarantine_grow(tsd, quarantine); + /* quarantine_grow() must free a slot if it fails to grow. */ + assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); + /* Append ptr if its size doesn't exceed the quarantine size. */ + if (quarantine->curbytes + usize <= opt_quarantine) { + size_t offset = (quarantine->first + quarantine->curobjs) & + ((ZU(1) << quarantine->lg_maxobjs) - 1); + quarantine_obj_t *obj = &quarantine->objs[offset]; + obj->ptr = ptr; + obj->usize = usize; + quarantine->curbytes += usize; + quarantine->curobjs++; + if (config_fill && unlikely(opt_junk_free)) { + /* + * Only do redzone validation if Valgrind isn't in + * operation. + */ + if ((!config_valgrind || likely(!in_valgrind)) + && usize <= SMALL_MAXCLASS) + arena_quarantine_junk_small(ptr, usize); + else + memset(ptr, JEMALLOC_FREE_JUNK, usize); + } + } else { + assert(quarantine->curbytes == 0); + idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); + } +} + +void +quarantine_cleanup(tsd_t *tsd) +{ + quarantine_t *quarantine; + + if (!config_fill) + return; + + quarantine = tsd_quarantine_get(tsd); + if (quarantine != NULL) { + quarantine_drain(tsd_tsdn(tsd), quarantine, 0); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); + tsd_quarantine_set(tsd, NULL); + } +} diff --git a/sources/jemalloc/src/je_rtree.c b/sources/jemalloc/src/je_rtree.c new file mode 100755 index 00000000..f2e2997d --- /dev/null +++ b/sources/jemalloc/src/je_rtree.c @@ -0,0 +1,132 @@ +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +static unsigned +hmin(unsigned ha, unsigned hb) +{ + + return (ha < hb ? ha : hb); +} + +/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */ +bool +rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, + rtree_node_dalloc_t *dalloc) +{ + unsigned bits_in_leaf, height, i; + + assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) / + RTREE_BITS_PER_LEVEL)); + assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); + + bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL + : (bits % RTREE_BITS_PER_LEVEL); + if (bits > bits_in_leaf) { + height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; + if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) + height++; + } else + height = 1; + assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); + + rtree->alloc = alloc; + rtree->dalloc = dalloc; + rtree->height = height; + + /* Root level. */ + rtree->levels[0].subtree = NULL; + rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL : + bits_in_leaf; + rtree->levels[0].cumbits = rtree->levels[0].bits; + /* Interior levels. */ + for (i = 1; i < height-1; i++) { + rtree->levels[i].subtree = NULL; + rtree->levels[i].bits = RTREE_BITS_PER_LEVEL; + rtree->levels[i].cumbits = rtree->levels[i-1].cumbits + + RTREE_BITS_PER_LEVEL; + } + /* Leaf level. */ + if (height > 1) { + rtree->levels[height-1].subtree = NULL; + rtree->levels[height-1].bits = bits_in_leaf; + rtree->levels[height-1].cumbits = bits; + } + + /* Compute lookup table to be used by rtree_start_level(). */ + for (i = 0; i < RTREE_HEIGHT_MAX; i++) { + rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height - + 1); + } + + return (false); +} + +static void +rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level) +{ + + if (level + 1 < rtree->height) { + size_t nchildren, i; + + nchildren = ZU(1) << rtree->levels[level].bits; + for (i = 0; i < nchildren; i++) { + rtree_node_elm_t *child = node[i].child; + if (child != NULL) + rtree_delete_subtree(rtree, child, level + 1); + } + } + rtree->dalloc(node); +} + +void +rtree_delete(rtree_t *rtree) +{ + unsigned i; + + for (i = 0; i < rtree->height; i++) { + rtree_node_elm_t *subtree = rtree->levels[i].subtree; + if (subtree != NULL) + rtree_delete_subtree(rtree, subtree, i); + } +} + +static rtree_node_elm_t * +rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) +{ + rtree_node_elm_t *node; + + if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { + spin_t spinner; + + /* + * Another thread is already in the process of initializing. + * Spin-wait until initialization is complete. + */ + spin_init(&spinner); + do { + spin_adaptive(&spinner); + node = atomic_read_p((void **)elmp); + } while (node == RTREE_NODE_INITIALIZING); + } else { + node = rtree->alloc(ZU(1) << rtree->levels[level].bits); + if (node == NULL) + return (NULL); + atomic_write_p((void **)elmp, node); + } + + return (node); +} + +rtree_node_elm_t * +rtree_subtree_read_hard(rtree_t *rtree, unsigned level) +{ + + return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); +} + +rtree_node_elm_t * +rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) +{ + + return (rtree_node_init(rtree, level+1, &elm->child)); +} diff --git a/sources/jemalloc/src/je_spin.c b/sources/jemalloc/src/je_spin.c new file mode 100755 index 00000000..5242d95a --- /dev/null +++ b/sources/jemalloc/src/je_spin.c @@ -0,0 +1,2 @@ +#define JEMALLOC_SPIN_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_stats.c b/sources/jemalloc/src/je_stats.c new file mode 100755 index 00000000..b76afc5a --- /dev/null +++ b/sources/jemalloc/src/je_stats.c @@ -0,0 +1,1173 @@ +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +#define CTL_GET(n, v, t) do { \ + size_t sz = sizeof(t); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ +} while (0) + +#define CTL_M2_GET(n, i, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = (i); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ +} while (0) + +#define CTL_M2_M4_GET(n, i, j, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = (i); \ + mib[4] = (j); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ +} while (0) + +/******************************************************************************/ +/* Data. */ + +bool opt_stats_print = false; + +size_t stats_cactive = 0; + +/******************************************************************************/ + +static void +stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool large, bool huge, unsigned i) +{ + size_t page; + bool in_gap, in_gap_prev; + unsigned nbins, j; + + CTL_GET("arenas.page", &page, size_t); + + CTL_GET("arenas.nbins", &nbins, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"bins\": [\n"); + } else { + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs" + " curruns regs pgs util nfills" + " nflushes newruns reruns\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs" + " curruns regs pgs util newruns" + " reruns\n"); + } + } + for (j = 0, in_gap = false; j < nbins; j++) { + uint64_t nruns; + size_t reg_size, run_size, curregs; + size_t curruns; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nreruns; + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, + uint64_t); + in_gap_prev = in_gap; + in_gap = (nruns == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t); + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, + &nrequests, uint64_t); + if (config_tcache) { + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, + &nfills, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, + &nflushes, uint64_t); + } + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns, + size_t); + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curregs\": %zu,\n" + "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n", + nmalloc, + ndalloc, + curregs, + nrequests); + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n", + nfills, + nflushes); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curruns\": %zu\n" + "\t\t\t\t\t}%s\n", + nreruns, + curruns, + (j + 1 < nbins) ? "," : ""); + } else if (!in_gap) { + size_t availregs, milli; + char util[6]; /* "x.yyy". */ + + availregs = nregs * curruns; + milli = (availregs != 0) ? (1000 * curregs) / availregs + : 1000; + + if (milli > 1000) { + /* + * Race detected: the counters were read in + * separate mallctl calls and concurrent + * operations happened in between. In this case + * no meaningful utilization can be computed. + */ + malloc_snprintf(util, sizeof(util), " race"); + } else if (milli < 10) { + malloc_snprintf(util, sizeof(util), + "0.00%zu", milli); + } else if (milli < 100) { + malloc_snprintf(util, sizeof(util), "0.0%zu", + milli); + } else if (milli < 1000) { + malloc_snprintf(util, sizeof(util), "0.%zu", + milli); + } else { + assert(milli == 1000); + malloc_snprintf(util, sizeof(util), "1"); + } + + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64 + " %12"FMTu64" %12"FMTu64" %12zu" + " %12zu %4u %3zu %-5s %12"FMTu64 + " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n", + reg_size, j, curregs * reg_size, nmalloc, + ndalloc, nrequests, curregs, curruns, nregs, + run_size / page, util, nfills, nflushes, + nruns, nreruns); + } else { + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64 + " %12"FMTu64" %12"FMTu64" %12zu" + " %12zu %4u %3zu %-5s %12"FMTu64 + " %12"FMTu64"\n", + reg_size, j, curregs * reg_size, nmalloc, + ndalloc, nrequests, curregs, curruns, nregs, + run_size / page, util, nruns, nreruns); + } + } + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t]%s\n", (large || huge) ? "," : ""); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + } +} + +static void +stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool huge, unsigned i) +{ + unsigned nbins, nlruns, j; + bool in_gap, in_gap_prev; + + CTL_GET("arenas.nbins", &nbins, unsigned); + CTL_GET("arenas.nlruns", &nlruns, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"lruns\": [\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: size ind allocated nmalloc" + " ndalloc nrequests curruns\n"); + } + for (j = 0, in_gap = false; j < nlruns; j++) { + uint64_t nmalloc, ndalloc, nrequests; + size_t run_size, curruns; + + CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, + &nrequests, uint64_t); + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns, + size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"curruns\": %zu\n" + "\t\t\t\t\t}%s\n", + curruns, + (j + 1 < nlruns) ? "," : ""); + } else if (!in_gap) { + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64" %12zu\n", + run_size, nbins + j, curruns * run_size, nmalloc, + ndalloc, nrequests, curruns); + } + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t]%s\n", huge ? "," : ""); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + } +} + +static void +stats_arena_hchunks_print(void (*write_cb)(void *, const char *), + void *cbopaque, bool json, unsigned i) +{ + unsigned nbins, nlruns, nhchunks, j; + bool in_gap, in_gap_prev; + + CTL_GET("arenas.nbins", &nbins, unsigned); + CTL_GET("arenas.nlruns", &nlruns, unsigned); + CTL_GET("arenas.nhchunks", &nhchunks, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"hchunks\": [\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "huge: size ind allocated nmalloc" + " ndalloc nrequests curhchunks\n"); + } + for (j = 0, in_gap = false; j < nhchunks; j++) { + uint64_t nmalloc, ndalloc, nrequests; + size_t hchunk_size, curhchunks; + + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, + &nmalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, + &ndalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, + &nrequests, uint64_t); + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j, + &curhchunks, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"curhchunks\": %zu\n" + "\t\t\t\t\t}%s\n", + curhchunks, + (j + 1 < nhchunks) ? "," : ""); + } else if (!in_gap) { + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64" %12zu\n", + hchunk_size, nbins + nlruns + j, + curhchunks * hchunk_size, nmalloc, ndalloc, + nrequests, curhchunks); + } + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t]\n"); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + } +} + +static void +stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, unsigned i, bool bins, bool large, bool huge) +{ + unsigned nthreads; + const char *dss; + ssize_t lg_dirty_mult, decay_time; + size_t page, pactive, pdirty, mapped, retained; + size_t metadata_mapped, metadata_allocated; + uint64_t npurge, nmadvise, purged; + size_t small_allocated; + uint64_t small_nmalloc, small_ndalloc, small_nrequests; + size_t large_allocated; + uint64_t large_nmalloc, large_ndalloc, large_nrequests; + size_t huge_allocated; + uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; + + CTL_GET("arenas.page", &page, size_t); + + CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"nthreads\": %u,\n", nthreads); + } else { + malloc_cprintf(write_cb, cbopaque, + "assigned threads: %u\n", nthreads); + } + + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dss\": \"%s\",\n", dss); + } else { + malloc_cprintf(write_cb, cbopaque, + "dss allocation precedence: %s\n", dss); + } + + CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult); + } else { + if (opt_purge == purge_mode_ratio) { + if (lg_dirty_mult >= 0) { + malloc_cprintf(write_cb, cbopaque, + "min active:dirty page ratio: %u:1\n", + (1U << lg_dirty_mult)); + } else { + malloc_cprintf(write_cb, cbopaque, + "min active:dirty page ratio: N/A\n"); + } + } + } + + CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"decay_time\": %zd,\n", decay_time); + } else { + if (opt_purge == purge_mode_decay) { + if (decay_time >= 0) { + malloc_cprintf(write_cb, cbopaque, + "decay time: %zd\n", decay_time); + } else { + malloc_cprintf(write_cb, cbopaque, + "decay time: N/A\n"); + } + } + } + + CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); + CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); + CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); + CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pactive\": %zu,\n", pactive); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pdirty\": %zu,\n", pdirty); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"purged\": %"FMTu64",\n", purged); + } else { + malloc_cprintf(write_cb, cbopaque, + "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64 + ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged); + } + + CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, + uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"small\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + " allocated nmalloc" + " ndalloc nrequests\n"); + malloc_cprintf(write_cb, cbopaque, + "small: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated, small_nmalloc, small_ndalloc, + small_nrequests); + } + + CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, + uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"large\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + large_allocated, large_nmalloc, large_ndalloc, + large_nrequests); + } + + CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); + CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, + uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"huge\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "huge: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); + malloc_cprintf(write_cb, cbopaque, + "total: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated + large_allocated + huge_allocated, + small_nmalloc + large_nmalloc + huge_nmalloc, + small_ndalloc + large_ndalloc + huge_ndalloc, + small_nrequests + large_nrequests + huge_nrequests); + } + if (!json) { + malloc_cprintf(write_cb, cbopaque, + "active: %12zu\n", pactive * page); + } + + CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"mapped\": %zu,\n", mapped); + } else { + malloc_cprintf(write_cb, cbopaque, + "mapped: %12zu\n", mapped); + } + + CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"retained\": %zu,\n", retained); + } else { + malloc_cprintf(write_cb, cbopaque, + "retained: %12zu\n", retained); + } + + CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, + size_t); + CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, + size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"metadata\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (bins || large || huge) ? "," : ""); + } else { + malloc_cprintf(write_cb, cbopaque, + "metadata: mapped: %zu, allocated: %zu\n", + metadata_mapped, metadata_allocated); + } + + if (bins) { + stats_arena_bins_print(write_cb, cbopaque, json, large, huge, + i); + } + if (large) + stats_arena_lruns_print(write_cb, cbopaque, json, huge, i); + if (huge) + stats_arena_hchunks_print(write_cb, cbopaque, json, i); +} + +static void +stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool more) +{ + const char *cpv; + bool bv; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + ssize_t ssv; + size_t sv, bsz, usz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"version\": \"%s\",\n", cpv); + } else + malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); + + /* config. */ +#define CONFIG_WRITE_BOOL_JSON(n, c) \ + if (json) { \ + CTL_GET("config."#n, &bv, bool); \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \ + (c)); \ + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"config\": {\n"); + } + + CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",") + + CTL_GET("config.debug", &bv, bool); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"debug\": %s,\n", bv ? "true" : "false"); + } else { + malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", + bv ? "enabled" : "disabled"); + } + + CONFIG_WRITE_BOOL_JSON(fill, ",") + CONFIG_WRITE_BOOL_JSON(lazy_lock, ",") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"malloc_conf\": \"%s\",\n", + config_malloc_conf); + } else { + malloc_cprintf(write_cb, cbopaque, + "config.malloc_conf: \"%s\"\n", config_malloc_conf); + } + + CONFIG_WRITE_BOOL_JSON(munmap, ",") + CONFIG_WRITE_BOOL_JSON(prof, ",") + CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",") + CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",") + CONFIG_WRITE_BOOL_JSON(stats, ",") + CONFIG_WRITE_BOOL_JSON(tcache, ",") + CONFIG_WRITE_BOOL_JSON(tls, ",") + CONFIG_WRITE_BOOL_JSON(utrace, ",") + CONFIG_WRITE_BOOL_JSON(valgrind, ",") + CONFIG_WRITE_BOOL_JSON(xmalloc, "") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } +#undef CONFIG_WRITE_BOOL_JSON + + /* opt. */ +#define OPT_WRITE_BOOL(n, c) \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s\n", bv ? "true" : "false"); \ + } \ + } +#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ + bool bv2; \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ + je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s ("#m": %s)\n", bv ? "true" \ + : "false", bv2 ? "true" : "false"); \ + } \ + } \ +} +#define OPT_WRITE_UNSIGNED(n, c) \ + if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %u%s\n", uv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %u\n", uv); \ + } \ + } +#define OPT_WRITE_SIZE_T(n, c) \ + if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zu\n", sv); \ + } \ + } +#define OPT_WRITE_SSIZE_T(n, c) \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd\n", ssv); \ + } \ + } +#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ + ssize_t ssv2; \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ + je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd ("#m": %zd)\n", \ + ssv, ssv2); \ + } \ + } \ +} +#define OPT_WRITE_CHAR_P(n, c) \ + if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": \"%s\"\n", cpv); \ + } \ + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"opt\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "Run-time option settings:\n"); + } + OPT_WRITE_BOOL(abort, ",") + OPT_WRITE_SIZE_T(lg_chunk, ",") + OPT_WRITE_CHAR_P(dss, ",") + OPT_WRITE_UNSIGNED(narenas, ",") + OPT_WRITE_CHAR_P(purge, ",") + if (json || opt_purge == purge_mode_ratio) { + OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, + arenas.lg_dirty_mult, ",") + } + if (json || opt_purge == purge_mode_decay) { + OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",") + } + OPT_WRITE_CHAR_P(junk, ",") + OPT_WRITE_SIZE_T(quarantine, ",") + OPT_WRITE_BOOL(redzone, ",") + OPT_WRITE_BOOL(zero, ",") + OPT_WRITE_BOOL(utrace, ",") + OPT_WRITE_BOOL(xmalloc, ",") + OPT_WRITE_BOOL(tcache, ",") + OPT_WRITE_SSIZE_T(lg_tcache_max, ",") + OPT_WRITE_BOOL(thp, ",") + OPT_WRITE_BOOL(prof, ",") + OPT_WRITE_CHAR_P(prof_prefix, ",") + OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",") + OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init, + ",") + OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",") + OPT_WRITE_BOOL(prof_accum, ",") + OPT_WRITE_SSIZE_T(lg_prof_interval, ",") + OPT_WRITE_BOOL(prof_gdump, ",") + OPT_WRITE_BOOL(prof_final, ",") + OPT_WRITE_BOOL(prof_leak, ",") + /* + * stats_print is always emitted, so as long as stats_print comes last + * it's safe to unconditionally omit the comma here (rather than having + * to conditionally omit it elsewhere depending on configuration). + */ + OPT_WRITE_BOOL(stats_print, "") + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } + +#undef OPT_WRITE_BOOL +#undef OPT_WRITE_BOOL_MUTABLE +#undef OPT_WRITE_SIZE_T +#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_CHAR_P + + /* arenas. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"arenas\": {\n"); + } + + CTL_GET("arenas.narenas", &uv, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"narenas\": %u,\n", uv); + } else + malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); + + CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv); + } else if (opt_purge == purge_mode_ratio) { + if (ssv >= 0) { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: " + "%u:1\n", (1U << ssv)); + } else { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: " + "N/A\n"); + } + } + CTL_GET("arenas.decay_time", &ssv, ssize_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"decay_time\": %zd,\n", ssv); + } else if (opt_purge == purge_mode_decay) { + malloc_cprintf(write_cb, cbopaque, + "Unused dirty page decay time: %zd%s\n", + ssv, (ssv < 0) ? " (no decay)" : ""); + } + + CTL_GET("arenas.quantum", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"quantum\": %zu,\n", sv); + } else + malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); + + CTL_GET("arenas.page", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"page\": %zu,\n", sv); + } else + malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); + + if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"tcache_max\": %zu,\n", sv); + } else { + malloc_cprintf(write_cb, cbopaque, + "Maximum thread-cached size class: %zu\n", sv); + } + } + + if (json) { + unsigned nbins, nlruns, nhchunks, i; + + CTL_GET("arenas.nbins", &nbins, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nbins\": %u,\n", nbins); + + if (config_tcache) { + CTL_GET("arenas.nhbins", &uv, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nhbins\": %u,\n", uv); + } + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"bin\": [\n"); + for (i = 0; i < nbins; i++) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu,\n", sv); + + CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v); + + CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"run_size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t],\n"); + + CTL_GET("arenas.nlruns", &nlruns, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nlruns\": %u,\n", nlruns); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lrun\": [\n"); + for (i = 0; i < nlruns; i++) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t],\n"); + + CTL_GET("arenas.nhchunks", &nhchunks, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nhchunks\": %u,\n", nhchunks); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"hchunk\": [\n"); + for (i = 0; i < nhchunks; i++) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t]\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (config_prof || more) ? "," : ""); + } + + /* prof. */ + if (config_prof && json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"prof\": {\n"); + + CTL_GET("prof.thread_active_init", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" : + "false"); + + CTL_GET("prof.active", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.gdump", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.interval", &u64v, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"interval\": %"FMTu64",\n", u64v); + + CTL_GET("prof.lg_sample", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lg_sample\": %zd\n", ssv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", more ? "," : ""); + } +} + +static void +stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool merged, bool unmerged, bool bins, bool large, bool huge) +{ + size_t *cactive; + size_t allocated, active, metadata, resident, mapped, retained; + + CTL_GET("stats.cactive", &cactive, size_t *); + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("stats.retained", &retained, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive)); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"allocated\": %zu,\n", allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %zu,\n", active); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"metadata\": %zu,\n", metadata); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"resident\": %zu,\n", resident); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"mapped\": %zu,\n", mapped); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"retained\": %zu\n", retained); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (merged || unmerged) ? "," : ""); + } else { + malloc_cprintf(write_cb, cbopaque, + "Allocated: %zu, active: %zu, metadata: %zu," + " resident: %zu, mapped: %zu, retained: %zu\n", + allocated, active, metadata, resident, mapped, retained); + malloc_cprintf(write_cb, cbopaque, + "Current active ceiling: %zu\n", + atomic_read_z(cactive)); + } + + if (merged || unmerged) { + unsigned narenas; + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats.arenas\": {\n"); + } + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + VARIABLE_ARRAY(bool, initialized, narenas); + size_t isz; + unsigned i, j, ninitialized; + + isz = sizeof(bool) * narenas; + xmallctl("arenas.initialized", (void *)initialized, + &isz, NULL, 0); + for (i = ninitialized = 0; i < narenas; i++) { + if (initialized[i]) + ninitialized++; + } + + /* Merged stats. */ + if (merged && (ninitialized > 1 || !unmerged)) { + /* Print merged arena stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"merged\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "\nMerged arenas stats:\n"); + } + stats_arena_print(write_cb, cbopaque, json, + narenas, bins, large, huge); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t}%s\n", unmerged ? "," : + ""); + } + } + + /* Unmerged stats. */ + if (unmerged) { + for (i = j = 0; i < narenas; i++) { + if (initialized[i]) { + if (json) { + j++; + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t\"%u\": {\n", + i); + } else { + malloc_cprintf(write_cb, + cbopaque, + "\narenas[%u]:\n", + i); + } + stats_arena_print(write_cb, + cbopaque, json, i, bins, + large, huge); + if (json) { + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t}%s\n", (j < + ninitialized) ? "," + : ""); + } + } + } + } + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t}\n"); + } + } +} + +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + int err; + uint64_t epoch; + size_t u64sz; + bool json = false; + bool general = true; + bool merged = config_stats; + bool unmerged = config_stats; + bool bins = true; + bool large = true; + bool huge = true; + + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, + sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write(": Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write(": Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } + + if (opts != NULL) { + unsigned i; + + for (i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { + case 'J': + json = true; + break; + case 'g': + general = false; + break; + case 'm': + merged = false; + break; + case 'a': + unmerged = false; + break; + case 'b': + bins = false; + break; + case 'l': + large = false; + break; + case 'h': + huge = false; + break; + default:; + } + } + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "{\n" + "\t\"jemalloc\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "___ Begin jemalloc statistics ___\n"); + } + + if (general) { + bool more = (merged || unmerged); + stats_general_print(write_cb, cbopaque, json, more); + } + if (config_stats) { + stats_print_helper(write_cb, cbopaque, json, merged, unmerged, + bins, large, huge); + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t}\n" + "}\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "--- End jemalloc statistics ---\n"); + } +} diff --git a/sources/jemalloc/src/je_tcache.c b/sources/jemalloc/src/je_tcache.c new file mode 100755 index 00000000..e3b04be6 --- /dev/null +++ b/sources/jemalloc/src/je_tcache.c @@ -0,0 +1,619 @@ +#define JEMALLOC_TCACHE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +bool opt_tcache = true; +ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; + +tcache_bin_info_t *tcache_bin_info; +static unsigned stack_nelms; /* Total stack elms per tcache. */ + +unsigned nhbins; +size_t tcache_maxclass; + +tcaches_t *tcaches; + +/* Index of first element within tcaches that has never been used. */ +static unsigned tcaches_past; + +/* Head of singly linked list tracking available tcaches elements. */ +static tcaches_t *tcaches_avail; + +/* Protects tcaches{,_past,_avail}. */ +static malloc_mutex_t tcaches_mtx; + +/******************************************************************************/ + +size_t +tcache_salloc(tsdn_t *tsdn, const void *ptr) +{ + + return (arena_salloc(tsdn, ptr, false)); +} + +void +tcache_event_hard(tsd_t *tsd, tcache_t *tcache) +{ + szind_t binind = tcache->next_gc_bin; + tcache_bin_t *tbin = &tcache->tbins[binind]; + tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + + if (tbin->low_water > 0) { + /* + * Flush (ceiling) 3/4 of the objects below the low water mark. + */ + if (binind < NBINS) { + tcache_bin_flush_small(tsd, tcache, tbin, binind, + tbin->ncached - tbin->low_water + (tbin->low_water + >> 2)); + } else { + tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached + - tbin->low_water + (tbin->low_water >> 2), tcache); + } + /* + * Reduce fill count by 2X. Limit lg_fill_div such that the + * fill count is always at least 1. + */ + if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) + tbin->lg_fill_div++; + } else if (tbin->low_water < 0) { + /* + * Increase fill count by 2X. Make sure lg_fill_div stays + * greater than 0. + */ + if (tbin->lg_fill_div > 1) + tbin->lg_fill_div--; + } + tbin->low_water = tbin->ncached; + + tcache->next_gc_bin++; + if (tcache->next_gc_bin == nhbins) + tcache->next_gc_bin = 0; +} + +void * +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, bool *tcache_success) +{ + void *ret; + + arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ? + tcache->prof_accumbytes : 0); + if (config_prof) + tcache->prof_accumbytes = 0; + ret = tcache_alloc_easy(tbin, tcache_success); + + return (ret); +} + +void +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + szind_t binind, unsigned rem) +{ + arena_t *arena; + void *ptr; + unsigned i, nflush, ndeferred; + bool merged_stats = false; + + assert(binind < NBINS); + assert(rem <= tbin->ncached); + + arena = arena_choose(tsd, NULL); + assert(arena != NULL); + for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + /* Lock the arena bin associated with the first object. */ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + *(tbin->avail - 1)); + arena_t *bin_arena = extent_node_arena_get(&chunk->node); + arena_bin_t *bin = &bin_arena->bins[binind]; + + if (config_prof && bin_arena == arena) { + if (arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes)) + prof_idump(tsd_tsdn(tsd)); + tcache->prof_accumbytes = 0; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (config_stats && bin_arena == arena) { + assert(!merged_stats); + merged_stats = true; + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } + ndeferred = 0; + for (i = 0; i < nflush; i++) { + ptr = *(tbin->avail - 1 - i); + assert(ptr != NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (extent_node_arena_get(&chunk->node) == bin_arena) { + size_t pageind = ((uintptr_t)ptr - + (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_bits_t *bitselm = + arena_bitselm_get_mutable(chunk, pageind); + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, chunk, ptr, bitselm); + } else { + /* + * This object was allocated via a different + * arena bin than the one that is currently + * locked. Stash the object, so that it can be + * handled in a future pass. + */ + *(tbin->avail - 1 - ndeferred) = ptr; + ndeferred++; + } + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); + } + if (config_stats && !merged_stats) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + arena_bin_t *bin = &arena->bins[binind]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + } + + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); + tbin->ncached = rem; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; +} + +void +tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache) +{ + arena_t *arena; + void *ptr; + unsigned i, nflush, ndeferred; + bool merged_stats = false; + + assert(binind < nhbins); + assert(rem <= tbin->ncached); + + arena = arena_choose(tsd, NULL); + assert(arena != NULL); + for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + /* Lock the arena associated with the first object. */ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + *(tbin->avail - 1)); + arena_t *locked_arena = extent_node_arena_get(&chunk->node); + UNUSED bool idump; + + if (config_prof) + idump = false; + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock); + if ((config_prof || config_stats) && locked_arena == arena) { + if (config_prof) { + idump = arena_prof_accum_locked(arena, + tcache->prof_accumbytes); + tcache->prof_accumbytes = 0; + } + if (config_stats) { + merged_stats = true; + arena->stats.nrequests_large += + tbin->tstats.nrequests; + arena->stats.lstats[binind - NBINS].nrequests += + tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } + } + ndeferred = 0; + for (i = 0; i < nflush; i++) { + ptr = *(tbin->avail - 1 - i); + assert(ptr != NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (extent_node_arena_get(&chunk->node) == + locked_arena) { + arena_dalloc_large_junked_locked(tsd_tsdn(tsd), + locked_arena, chunk, ptr); + } else { + /* + * This object was allocated via a different + * arena than the one that is currently locked. + * Stash the object, so that it can be handled + * in a future pass. + */ + *(tbin->avail - 1 - ndeferred) = ptr; + ndeferred++; + } + } + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock); + if (config_prof && idump) + prof_idump(tsd_tsdn(tsd)); + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); + } + if (config_stats && !merged_stats) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[binind - NBINS].nrequests += + tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); + } + + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); + tbin->ncached = rem; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; +} + +static void +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) +{ + + if (config_stats) { + /* Link into list of extant tcaches. */ + malloc_mutex_lock(tsdn, &arena->lock); + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + malloc_mutex_unlock(tsdn, &arena->lock); + } +} + +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) +{ + + if (config_stats) { + /* Unlink from list of extant tcaches. */ + malloc_mutex_lock(tsdn, &arena->lock); + if (config_debug) { + bool in_ql = false; + tcache_t *iter; + ql_foreach(iter, &arena->tcache_ql, link) { + if (iter == tcache) { + in_ql = true; + break; + } + } + assert(in_ql); + } + ql_remove(&arena->tcache_ql, tcache, link); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->lock); + } +} + +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, + arena_t *newarena) +{ + + tcache_arena_dissociate(tsdn, tcache, oldarena); + tcache_arena_associate(tsdn, tcache, newarena); +} + +tcache_t * +tcache_get_hard(tsd_t *tsd) +{ + arena_t *arena; + + if (!tcache_enabled_get()) { + if (tsd_nominal(tsd)) + tcache_enabled_set(false); /* Memoize. */ + return (NULL); + } + arena = arena_choose(tsd, NULL); + if (unlikely(arena == NULL)) + return (NULL); + return (tcache_create(tsd_tsdn(tsd), arena)); +} + +tcache_t * +tcache_create(tsdn_t *tsdn, arena_t *arena) +{ + tcache_t *tcache; + size_t size, stack_offset; + unsigned i; + + size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); + /* Naturally align the pointer stacks. */ + size = PTR_CEILING(size); + stack_offset = size; + size += stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sa2u(size, CACHELINE); + + tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); + if (tcache == NULL) + return (NULL); + + tcache_arena_associate(tsdn, tcache, arena); + + ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); + + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + for (i = 0; i < nhbins; i++) { + tcache->tbins[i].lg_fill_div = 1; + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + /* + * avail points past the available space. Allocations will + * access the slots toward higher addresses (for the benefit of + * prefetch). + */ + tcache->tbins[i].avail = (void **)((uintptr_t)tcache + + (uintptr_t)stack_offset); + } + + return (tcache); +} + +static void +tcache_destroy(tsd_t *tsd, tcache_t *tcache) +{ + arena_t *arena; + unsigned i; + + arena = arena_choose(tsd, NULL); + tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena); + + for (i = 0; i < NBINS; i++) { + tcache_bin_t *tbin = &tcache->tbins[i]; + tcache_bin_flush_small(tsd, tcache, tbin, i, 0); + + if (config_stats && tbin->tstats.nrequests != 0) { + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + } + } + + for (; i < nhbins; i++) { + tcache_bin_t *tbin = &tcache->tbins[i]; + tcache_bin_flush_large(tsd, tbin, i, 0, tcache); + + if (config_stats && tbin->tstats.nrequests != 0) { + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[i - NBINS].nrequests += + tbin->tstats.nrequests; + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); + } + } + + if (config_prof && tcache->prof_accumbytes > 0 && + arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) + prof_idump(tsd_tsdn(tsd)); + + idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true); +} + +void +tcache_cleanup(tsd_t *tsd) +{ + tcache_t *tcache; + + if (!config_tcache) + return; + + if ((tcache = tsd_tcache_get(tsd)) != NULL) { + tcache_destroy(tsd, tcache); + tsd_tcache_set(tsd, NULL); + } +} + +void +tcache_enabled_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) +{ + unsigned i; + + cassert(config_stats); + + malloc_mutex_assert_owner(tsdn, &arena->lock); + + /* Merge and reset tcache stats. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + tcache_bin_t *tbin = &tcache->tbins[i]; + malloc_mutex_lock(tsdn, &bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(tsdn, &bin->lock); + tbin->tstats.nrequests = 0; + } + + for (; i < nhbins; i++) { + malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; + tcache_bin_t *tbin = &tcache->tbins[i]; + arena->stats.nrequests_large += tbin->tstats.nrequests; + lstats->nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } +} + +static bool +tcaches_create_prep(tsd_t *tsd) { + bool err; + + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + + if (tcaches == NULL) { + tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) * + (MALLOCX_TCACHE_MAX+1)); + if (tcaches == NULL) { + err = true; + goto label_return; + } + } + + if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { + err = true; + goto label_return; + } + + err = false; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + return err; +} + +bool +tcaches_create(tsd_t *tsd, unsigned *r_ind) { + bool err; + arena_t *arena; + tcache_t *tcache; + tcaches_t *elm; + + if (tcaches_create_prep(tsd)) { + err = true; + goto label_return; + } + + arena = arena_ichoose(tsd, NULL); + if (unlikely(arena == NULL)) { + err = true; + goto label_return; + } + tcache = tcache_create(tsd_tsdn(tsd), arena); + if (tcache == NULL) { + err = true; + goto label_return; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcaches_avail != NULL) { + elm = tcaches_avail; + tcaches_avail = tcaches_avail->next; + elm->tcache = tcache; + *r_ind = (unsigned)(elm - tcaches); + } else { + elm = &tcaches[tcaches_past]; + elm->tcache = tcache; + *r_ind = tcaches_past; + tcaches_past++; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + + err = false; +label_return: + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &tcaches_mtx); + return err; +} + +static void +tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); + + if (elm->tcache == NULL) { + return; + } + tcache_destroy(tsd, elm->tcache); + elm->tcache = NULL; +} + +void +tcaches_flush(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcaches_elm_flush(tsd, &tcaches[ind]); + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); +} + +void +tcaches_destroy(tsd_t *tsd, unsigned ind) { + tcaches_t *elm; + + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + elm = &tcaches[ind]; + tcaches_elm_flush(tsd, elm); + elm->next = tcaches_avail; + tcaches_avail = elm; + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); +} + +bool +tcache_boot(tsdn_t *tsdn) { + unsigned i; + + cassert(config_tcache); + + /* + * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is + * known. + */ + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS) + tcache_maxclass = SMALL_MAXCLASS; + else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass) + tcache_maxclass = large_maxclass; + else + tcache_maxclass = (ZU(1) << opt_lg_tcache_max); + + if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES)) { + return true; + } + + nhbins = size2index(tcache_maxclass) + 1; + + /* Initialize tcache_bin_info. */ + tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins * + sizeof(tcache_bin_info_t)); + if (tcache_bin_info == NULL) + return (true); + stack_nelms = 0; + for (i = 0; i < NBINS; i++) { + if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MIN; + } else if ((arena_bin_info[i].nregs << 1) <= + TCACHE_NSLOTS_SMALL_MAX) { + tcache_bin_info[i].ncached_max = + (arena_bin_info[i].nregs << 1); + } else { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MAX; + } + stack_nelms += tcache_bin_info[i].ncached_max; + } + for (; i < nhbins; i++) { + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; + stack_nelms += tcache_bin_info[i].ncached_max; + } + + return (false); +} + +void +tcache_prefork(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_prefork(tsdn, &tcaches_mtx); + } +} + +void +tcache_postfork_parent(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); + } +} + +void +tcache_postfork_child(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_child(tsdn, &tcaches_mtx); + } +} diff --git a/sources/jemalloc/src/je_ticker.c b/sources/jemalloc/src/je_ticker.c new file mode 100755 index 00000000..db090240 --- /dev/null +++ b/sources/jemalloc/src/je_ticker.c @@ -0,0 +1,2 @@ +#define JEMALLOC_TICKER_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/sources/jemalloc/src/je_tsd.c b/sources/jemalloc/src/je_tsd.c new file mode 100755 index 00000000..ec69a51c --- /dev/null +++ b/sources/jemalloc/src/je_tsd.c @@ -0,0 +1,197 @@ +#define JEMALLOC_TSD_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +static unsigned ncleanups; +static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; + +malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) + +/******************************************************************************/ + +void * +malloc_tsd_malloc(size_t size) +{ + + return (a0malloc(CACHELINE_CEILING(size))); +} + +void +malloc_tsd_dalloc(void *wrapper) +{ + + a0dalloc(wrapper); +} + +void +malloc_tsd_no_cleanup(void *arg) +{ + + not_reached(); +} + +#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) +#ifndef _WIN32 +JEMALLOC_EXPORT +#endif +void +_malloc_thread_cleanup(void) +{ + bool pending[MALLOC_TSD_CLEANUPS_MAX], again; + unsigned i; + + for (i = 0; i < ncleanups; i++) + pending[i] = true; + + do { + again = false; + for (i = 0; i < ncleanups; i++) { + if (pending[i]) { + pending[i] = cleanups[i](); + if (pending[i]) + again = true; + } + } + } while (again); +} +#endif + +void +malloc_tsd_cleanup_register(bool (*f)(void)) +{ + + assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); + cleanups[ncleanups] = f; + ncleanups++; +} + +void +tsd_cleanup(void *arg) +{ + tsd_t *tsd = (tsd_t *)arg; + + switch (tsd->state) { + case tsd_state_uninitialized: + /* Do nothing. */ + break; + case tsd_state_nominal: +#define O(n, t) \ + n##_cleanup(tsd); +MALLOC_TSD +#undef O + tsd->state = tsd_state_purgatory; + tsd_set(tsd); + break; + case tsd_state_purgatory: + /* + * The previous time this destructor was called, we set the + * state to tsd_state_purgatory so that other destructors + * wouldn't cause re-creation of the tsd. This time, do + * nothing, and do not request another callback. + */ + break; + case tsd_state_reincarnated: + /* + * Another destructor deallocated memory after this destructor + * was called. Reset state to tsd_state_purgatory and request + * another callback. + */ + tsd->state = tsd_state_purgatory; + tsd_set(tsd); + break; + default: + not_reached(); + } +} + +tsd_t * +malloc_tsd_boot0(void) +{ + tsd_t *tsd; + + ncleanups = 0; + if (tsd_boot0()) + return (NULL); + tsd = tsd_fetch(); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + return (tsd); +} + +void +malloc_tsd_boot1(void) +{ + + tsd_boot1(); + *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false; +} + +#ifdef _WIN32 +static BOOL WINAPI +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) +{ + + switch (fdwReason) { +#ifdef JEMALLOC_LAZY_LOCK + case DLL_THREAD_ATTACH: + isthreaded = true; + break; +#endif + case DLL_THREAD_DETACH: + _malloc_thread_cleanup(); + break; + default: + break; + } + return (true); +} + +#ifdef _MSC_VER +# ifdef _M_IX86 +# pragma comment(linker, "/INCLUDE:__tls_used") +# pragma comment(linker, "/INCLUDE:_tls_callback") +# else +# pragma comment(linker, "/INCLUDE:_tls_used") +# pragma comment(linker, "/INCLUDE:tls_callback") +# endif +# pragma section(".CRT$XLY",long,read) +#endif +JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) +BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, + DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; +#endif + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void * +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) +{ + pthread_t self = pthread_self(); + tsd_init_block_t *iter; + + /* Check whether this thread has already inserted into the list. */ + malloc_mutex_lock(TSDN_NULL, &head->lock); + ql_foreach(iter, &head->blocks, link) { + if (iter->thread == self) { + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return (iter->data); + } + } + /* Insert block into list. */ + ql_elm_new(block, link); + block->thread = self; + ql_tail_insert(&head->blocks, block, link); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return (NULL); +} + +void +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) +{ + + malloc_mutex_lock(TSDN_NULL, &head->lock); + ql_remove(&head->blocks, block, link); + malloc_mutex_unlock(TSDN_NULL, &head->lock); +} +#endif diff --git a/sources/jemalloc/src/je_util.c b/sources/jemalloc/src/je_util.c new file mode 100755 index 00000000..dd8c2363 --- /dev/null +++ b/sources/jemalloc/src/je_util.c @@ -0,0 +1,666 @@ +/* + * Define simple versions of assertion macros that won't recurse in case + * of assertion failures in malloc_*printf(). + */ +#define assert(e) do { \ + if (config_debug && !(e)) { \ + malloc_write(": Failed assertion\n"); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + if (config_debug) { \ + malloc_write(": Unreachable code reached\n"); \ + abort(); \ + } \ + unreachable(); \ +} while (0) + +#define not_implemented() do { \ + if (config_debug) { \ + malloc_write(": Not implemented\n"); \ + abort(); \ + } \ +} while (0) + +#define JEMALLOC_UTIL_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void wrtmessage(void *cbopaque, const char *s); +#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) +static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, + size_t *slen_p); +#define D2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); +#define O2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); +#define X2S_BUFSIZE (2 + U2S_BUFSIZE) +static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, + size_t *slen_p); + +/******************************************************************************/ + +/* malloc_message() setup. */ +static void +wrtmessage(void *cbopaque, const char *s) +{ + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) + /* + * Use syscall(2) rather than write(2) when possible in order to avoid + * the possibility of memory allocation within libc. This is necessary + * on FreeBSD; most operating systems do not have this problem though. + * + * syscall() returns long or int, depending on platform, so capture the + * unused result in the widest plausible type to avoid compiler + * warnings. + */ + UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); +#else + UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s)); +#endif +} + +JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); + +/* + * Wrapper around malloc_message() that avoids the need for + * je_malloc_message(...) throughout the code. + */ +void +malloc_write(const char *s) +{ + + if (je_malloc_message != NULL) + je_malloc_message(NULL, s); + else + wrtmessage(NULL, s); +} + +/* + * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so + * provide a wrapper. + */ +int +buferror(int err, char *buf, size_t buflen) +{ + +#ifdef _WIN32 + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, + (LPSTR)buf, (DWORD)buflen, NULL); + return (0); +#elif defined(__GLIBC__) && defined(_GNU_SOURCE) + char *b = strerror_r(err, buf, buflen); + if (b != buf) { + strncpy(buf, b, buflen); + buf[buflen-1] = '\0'; + } + return (0); +#else + return (strerror_r(err, buf, buflen)); +#endif +} + +uintmax_t +malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) +{ + uintmax_t ret, digit; + unsigned b; + bool neg; + const char *p, *ns; + + p = nptr; + if (base < 0 || base == 1 || base > 36) { + ns = p; + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + b = base; + + /* Swallow leading whitespace and get sign, if any. */ + neg = false; + while (true) { + switch (*p) { + case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': + p++; + break; + case '-': + neg = true; + /* Fall through. */ + case '+': + p++; + /* Fall through. */ + default: + goto label_prefix; + } + } + + /* Get prefix, if any. */ + label_prefix: + /* + * Note where the first non-whitespace/sign character is so that it is + * possible to tell whether any digits are consumed (e.g., " 0" vs. + * " -x"). + */ + ns = p; + if (*p == '0') { + switch (p[1]) { + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': + if (b == 0) + b = 8; + if (b == 8) + p++; + break; + case 'X': case 'x': + switch (p[2]) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + if (b == 0) + b = 16; + if (b == 16) + p += 2; + break; + default: + break; + } + break; + default: + p++; + ret = 0; + goto label_return; + } + } + if (b == 0) + b = 10; + + /* Convert. */ + ret = 0; + while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) + || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) + || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { + uintmax_t pret = ret; + ret *= b; + ret += digit; + if (ret < pret) { + /* Overflow. */ + set_errno(ERANGE); + ret = UINTMAX_MAX; + goto label_return; + } + p++; + } + if (neg) + ret = (uintmax_t)(-((intmax_t)ret)); + + if (p == ns) { + /* No conversion performed. */ + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + +label_return: + if (endptr != NULL) { + if (p == ns) { + /* No characters were converted. */ + *endptr = (char *)nptr; + } else + *endptr = (char *)p; + } + return (ret); +} + +static char * +u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) +{ + unsigned i; + + i = U2S_BUFSIZE - 1; + s[i] = '\0'; + switch (base) { + case 10: + do { + i--; + s[i] = "0123456789"[x % (uint64_t)10]; + x /= (uint64_t)10; + } while (x > 0); + break; + case 16: { + const char *digits = (uppercase) + ? "0123456789ABCDEF" + : "0123456789abcdef"; + + do { + i--; + s[i] = digits[x & 0xf]; + x >>= 4; + } while (x > 0); + break; + } default: { + const char *digits = (uppercase) + ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + : "0123456789abcdefghijklmnopqrstuvwxyz"; + + assert(base >= 2 && base <= 36); + do { + i--; + s[i] = digits[x % (uint64_t)base]; + x /= (uint64_t)base; + } while (x > 0); + }} + + *slen_p = U2S_BUFSIZE - 1 - i; + return (&s[i]); +} + +static char * +d2s(intmax_t x, char sign, char *s, size_t *slen_p) +{ + bool neg; + + if ((neg = (x < 0))) + x = -x; + s = u2s(x, 10, false, s, slen_p); + if (neg) + sign = '-'; + switch (sign) { + case '-': + if (!neg) + break; + /* Fall through. */ + case ' ': + case '+': + s--; + (*slen_p)++; + *s = sign; + break; + default: not_reached(); + } + return (s); +} + +static char * +o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) +{ + + s = u2s(x, 8, false, s, slen_p); + if (alt_form && *s != '0') { + s--; + (*slen_p)++; + *s = '0'; + } + return (s); +} + +static char * +x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) +{ + + s = u2s(x, 16, uppercase, s, slen_p); + if (alt_form) { + s -= 2; + (*slen_p) += 2; + memcpy(s, uppercase ? "0X" : "0x", 2); + } + return (s); +} + +size_t +malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) +{ + size_t i; + const char *f; + +#define APPEND_C(c) do { \ + if (i < size) \ + str[i] = (c); \ + i++; \ +} while (0) +#define APPEND_S(s, slen) do { \ + if (i < size) { \ + size_t cpylen = (slen <= size - i) ? slen : size - i; \ + memcpy(&str[i], s, cpylen); \ + } \ + i += slen; \ +} while (0) +#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ + /* Left padding. */ \ + size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ + (size_t)width - slen : 0); \ + if (!left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) \ + APPEND_C(' '); \ + } \ + /* Value. */ \ + APPEND_S(s, slen); \ + /* Right padding. */ \ + if (left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) \ + APPEND_C(' '); \ + } \ +} while (0) +#define GET_ARG_NUMERIC(val, len) do { \ + switch (len) { \ + case '?': \ + val = va_arg(ap, int); \ + break; \ + case '?' | 0x80: \ + val = va_arg(ap, unsigned int); \ + break; \ + case 'l': \ + val = va_arg(ap, long); \ + break; \ + case 'l' | 0x80: \ + val = va_arg(ap, unsigned long); \ + break; \ + case 'q': \ + val = va_arg(ap, long long); \ + break; \ + case 'q' | 0x80: \ + val = va_arg(ap, unsigned long long); \ + break; \ + case 'j': \ + val = va_arg(ap, intmax_t); \ + break; \ + case 'j' | 0x80: \ + val = va_arg(ap, uintmax_t); \ + break; \ + case 't': \ + val = va_arg(ap, ptrdiff_t); \ + break; \ + case 'z': \ + val = va_arg(ap, ssize_t); \ + break; \ + case 'z' | 0x80: \ + val = va_arg(ap, size_t); \ + break; \ + case 'p': /* Synthetic; used for %p. */ \ + val = va_arg(ap, uintptr_t); \ + break; \ + default: \ + not_reached(); \ + val = 0; \ + } \ +} while (0) + + i = 0; + f = format; + while (true) { + switch (*f) { + case '\0': goto label_out; + case '%': { + bool alt_form = false; + bool left_justify = false; + bool plus_space = false; + bool plus_plus = false; + int prec = -1; + int width = -1; + unsigned char len = '?'; + char *s; + size_t slen; + + f++; + /* Flags. */ + while (true) { + switch (*f) { + case '#': + assert(!alt_form); + alt_form = true; + break; + case '-': + assert(!left_justify); + left_justify = true; + break; + case ' ': + assert(!plus_space); + plus_space = true; + break; + case '+': + assert(!plus_plus); + plus_plus = true; + break; + default: goto label_width; + } + f++; + } + /* Width. */ + label_width: + switch (*f) { + case '*': + width = va_arg(ap, int); + f++; + if (width < 0) { + left_justify = true; + width = -width; + } + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uwidth; + set_errno(0); + uwidth = malloc_strtoumax(f, (char **)&f, 10); + assert(uwidth != UINTMAX_MAX || get_errno() != + ERANGE); + width = (int)uwidth; + break; + } default: + break; + } + /* Width/precision separator. */ + if (*f == '.') + f++; + else + goto label_length; + /* Precision. */ + switch (*f) { + case '*': + prec = va_arg(ap, int); + f++; + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uprec; + set_errno(0); + uprec = malloc_strtoumax(f, (char **)&f, 10); + assert(uprec != UINTMAX_MAX || get_errno() != + ERANGE); + prec = (int)uprec; + break; + } + default: break; + } + /* Length. */ + label_length: + switch (*f) { + case 'l': + f++; + if (*f == 'l') { + len = 'q'; + f++; + } else + len = 'l'; + break; + case 'q': case 'j': case 't': case 'z': + len = *f; + f++; + break; + default: break; + } + /* Conversion specifier. */ + switch (*f) { + case '%': + /* %% */ + APPEND_C(*f); + f++; + break; + case 'd': case 'i': { + intmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[D2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len); + s = d2s(val, (plus_plus ? '+' : (plus_space ? + ' ' : '-')), buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'o': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[O2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = o2s(val, alt_form, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'u': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[U2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = u2s(val, 10, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'x': case 'X': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = x2s(val, alt_form, *f == 'X', buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'c': { + unsigned char val; + char buf[2]; + + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + val = va_arg(ap, int); + buf[0] = val; + buf[1] = '\0'; + APPEND_PADDED_S(buf, 1, width, left_justify); + f++; + break; + } case 's': + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + s = va_arg(ap, char *); + slen = (prec < 0) ? strlen(s) : (size_t)prec; + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + case 'p': { + uintmax_t val; + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, 'p'); + s = x2s(val, true, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } default: not_reached(); + } + break; + } default: { + APPEND_C(*f); + f++; + break; + }} + } + label_out: + if (i < size) + str[i] = '\0'; + else + str[size - 1] = '\0'; + +#undef APPEND_C +#undef APPEND_S +#undef APPEND_PADDED_S +#undef GET_ARG_NUMERIC + return (i); +} + +JEMALLOC_FORMAT_PRINTF(3, 4) +size_t +malloc_snprintf(char *str, size_t size, const char *format, ...) +{ + size_t ret; + va_list ap; + + va_start(ap, format); + ret = malloc_vsnprintf(str, size, format, ap); + va_end(ap); + + return (ret); +} + +void +malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap) +{ + char buf[MALLOC_PRINTF_BUFSIZE]; + + if (write_cb == NULL) { + /* + * The caller did not provide an alternate write_cb callback + * function, so use the default one. malloc_write() is an + * inline function, so use malloc_message() directly here. + */ + write_cb = (je_malloc_message != NULL) ? je_malloc_message : + wrtmessage; + cbopaque = NULL; + } + + malloc_vsnprintf(buf, sizeof(buf), format, ap); + write_cb(cbopaque, buf); +} + +/* + * Print to a callback function in such a way as to (hopefully) avoid memory + * allocation. + */ +JEMALLOC_FORMAT_PRINTF(3, 4) +void +malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(write_cb, cbopaque, format, ap); + va_end(ap); +} + +/* Print to stderr in such a way as to avoid memory allocation. */ +JEMALLOC_FORMAT_PRINTF(1, 2) +void +malloc_printf(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); +} + +/* + * Restore normal assertion macros, in order to make it possible to compile all + * C files as a single concatenation. + */ +#undef assert +#undef not_reached +#undef not_implemented +#include "jemalloc/internal/assert.h" diff --git a/sources/jemalloc/src/je_valgrind.c b/sources/jemalloc/src/je_valgrind.c new file mode 100755 index 00000000..8e7ef3a2 --- /dev/null +++ b/sources/jemalloc/src/je_valgrind.c @@ -0,0 +1,34 @@ +#include "jemalloc/internal/jemalloc_internal.h" +#ifndef JEMALLOC_VALGRIND +# error "This source file is for Valgrind integration." +#endif + +#include + +void +valgrind_make_mem_noaccess(void *ptr, size_t usize) +{ + + VALGRIND_MAKE_MEM_NOACCESS(ptr, usize); +} + +void +valgrind_make_mem_undefined(void *ptr, size_t usize) +{ + + VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize); +} + +void +valgrind_make_mem_defined(void *ptr, size_t usize) +{ + + VALGRIND_MAKE_MEM_DEFINED(ptr, usize); +} + +void +valgrind_freelike_block(void *ptr, size_t usize) +{ + + VALGRIND_FREELIKE_BLOCK(ptr, usize); +} diff --git a/sources/jemalloc/src/je_witness.c b/sources/jemalloc/src/je_witness.c new file mode 100755 index 00000000..c3a65f7c --- /dev/null +++ b/sources/jemalloc/src/je_witness.c @@ -0,0 +1,136 @@ +#define JEMALLOC_WITNESS_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +void +witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp) +{ + + witness->name = name; + witness->rank = rank; + witness->comp = comp; +} + +#ifdef JEMALLOC_JET +#undef witness_lock_error +#define witness_lock_error JEMALLOC_N(n_witness_lock_error) +#endif +void +witness_lock_error(const witness_list_t *witnesses, const witness_t *witness) +{ + witness_t *w; + + malloc_printf(": Lock rank order reversal:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf(" %s(%u)\n", witness->name, witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_lock_error +#define witness_lock_error JEMALLOC_N(witness_lock_error) +witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_owner_error +#define witness_owner_error JEMALLOC_N(n_witness_owner_error) +#endif +void +witness_owner_error(const witness_t *witness) +{ + + malloc_printf(": Should own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_owner_error +#define witness_owner_error JEMALLOC_N(witness_owner_error) +witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_not_owner_error +#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error) +#endif +void +witness_not_owner_error(const witness_t *witness) +{ + + malloc_printf(": Should not own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_not_owner_error +#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) +witness_not_owner_error_t *witness_not_owner_error = + JEMALLOC_N(n_witness_not_owner_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_depth_error +#define witness_depth_error JEMALLOC_N(n_witness_depth_error) +#endif +void +witness_depth_error(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth) { + witness_t *w; + + malloc_printf(": Should own %u lock%s of rank >= %u:", depth, + (depth != 1) ? "s" : "", rank_inclusive); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf("\n"); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_depth_error +#define witness_depth_error JEMALLOC_N(witness_depth_error) +witness_depth_error_t *witness_depth_error = JEMALLOC_N(n_witness_depth_error); +#endif + +void +witnesses_cleanup(tsd_t *tsd) +{ + + witness_assert_lockless(tsd_tsdn(tsd)); + + /* Do nothing. */ +} + +void +witness_fork_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +witness_prefork(tsd_t *tsd) +{ + + tsd_witness_fork_set(tsd, true); +} + +void +witness_postfork_parent(tsd_t *tsd) +{ + + tsd_witness_fork_set(tsd, false); +} + +void +witness_postfork_child(tsd_t *tsd) +{ +#ifndef JEMALLOC_MUTEX_INIT_CB + witness_list_t *witnesses; + + witnesses = tsd_witnessesp_get(tsd); + ql_new(witnesses); +#endif + tsd_witness_fork_set(tsd, false); +} diff --git a/sources/jemalloc/src/je_zone.c b/sources/jemalloc/src/je_zone.c new file mode 100755 index 00000000..6215133f --- /dev/null +++ b/sources/jemalloc/src/je_zone.c @@ -0,0 +1,478 @@ +#include "jemalloc/internal/jemalloc_internal.h" +#ifndef JEMALLOC_ZONE +# error "This source file is for zones on Darwin (OS X)." +#endif + +/* Definitions of the following structs in malloc/malloc.h might be too old + * for the built binary to run on newer versions of OSX. So use the newest + * possible version of those structs. + */ +typedef struct _malloc_zone_t { + void *reserved1; + void *reserved2; + size_t (*size)(struct _malloc_zone_t *, const void *); + void *(*malloc)(struct _malloc_zone_t *, size_t); + void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); + void *(*valloc)(struct _malloc_zone_t *, size_t); + void (*free)(struct _malloc_zone_t *, void *); + void *(*realloc)(struct _malloc_zone_t *, void *, size_t); + void (*destroy)(struct _malloc_zone_t *); + const char *zone_name; + unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); + void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); + struct malloc_introspection_t *introspect; + unsigned version; + void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); + void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); + size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); +} malloc_zone_t; + +typedef struct { + vm_address_t address; + vm_size_t size; +} vm_range_t; + +typedef struct malloc_statistics_t { + unsigned blocks_in_use; + size_t size_in_use; + size_t max_size_in_use; + size_t size_allocated; +} malloc_statistics_t; + +typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); + +typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); + +typedef struct malloc_introspection_t { + kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); + size_t (*good_size)(malloc_zone_t *, size_t); + boolean_t (*check)(malloc_zone_t *); + void (*print)(malloc_zone_t *, boolean_t); + void (*log)(malloc_zone_t *, void *); + void (*force_lock)(malloc_zone_t *); + void (*force_unlock)(malloc_zone_t *); + void (*statistics)(malloc_zone_t *, malloc_statistics_t *); + boolean_t (*zone_locked)(malloc_zone_t *); + boolean_t (*enable_discharge_checking)(malloc_zone_t *); + boolean_t (*disable_discharge_checking)(malloc_zone_t *); + void (*discharge)(malloc_zone_t *, void *); +#ifdef __BLOCKS__ + void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); +#else + void *enumerate_unavailable_without_blocks; +#endif + void (*reinit_lock)(malloc_zone_t *); +} malloc_introspection_t; + +extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); + +extern malloc_zone_t *malloc_default_zone(void); + +extern void malloc_zone_register(malloc_zone_t *zone); + +extern void malloc_zone_unregister(malloc_zone_t *zone); + +/* + * The malloc_default_purgeable_zone() function is only available on >= 10.6. + * We need to check whether it is present at runtime, thus the weak_import. + */ +extern malloc_zone_t *malloc_default_purgeable_zone(void) +JEMALLOC_ATTR(weak_import); + +/******************************************************************************/ +/* Data. */ + +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; +static struct malloc_introspection_t jemalloc_zone_introspect; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static size_t zone_size(malloc_zone_t *zone, const void *ptr); +static void *zone_malloc(malloc_zone_t *zone, size_t size); +static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); +static void *zone_valloc(malloc_zone_t *zone, size_t size); +static void zone_free(malloc_zone_t *zone, void *ptr); +static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); +static void *zone_memalign(malloc_zone_t *zone, size_t alignment, + size_t size); +static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, + size_t size); +static void zone_destroy(malloc_zone_t *zone); +static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, + void **results, unsigned num_requested); +static void zone_batch_free(struct _malloc_zone_t *zone, + void **to_be_freed, unsigned num_to_be_freed); +static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); +static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder); +static boolean_t zone_check(malloc_zone_t *zone); +static void zone_print(malloc_zone_t *zone, boolean_t verbose); +static void zone_log(malloc_zone_t *zone, void *address); +static void zone_force_lock(malloc_zone_t *zone); +static void zone_force_unlock(malloc_zone_t *zone); +static void zone_statistics(malloc_zone_t *zone, + malloc_statistics_t *stats); +static boolean_t zone_locked(malloc_zone_t *zone); +static void zone_reinit_lock(malloc_zone_t *zone); + +/******************************************************************************/ +/* + * Functions. + */ + +static size_t +zone_size(malloc_zone_t *zone, const void *ptr) +{ + + /* + * There appear to be places within Darwin (such as setenv(3)) that + * cause calls to this function with pointers that *no* zone owns. If + * we knew that all pointers were owned by *some* zone, we could split + * our zone into two parts, and use one as the default allocator and + * the other as the default deallocator/reallocator. Since that will + * not work in practice, we must check all pointers to assure that they + * reside within a mapped chunk before determining size. + */ + return (ivsalloc(tsdn_fetch(), ptr, config_prof)); +} + +static void * +zone_malloc(malloc_zone_t *zone, size_t size) +{ + + return (je_malloc(size)); +} + +static void * +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) +{ + + return (je_calloc(num, size)); +} + +static void * +zone_valloc(malloc_zone_t *zone, size_t size) +{ + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, PAGE, size); + + return (ret); +} + +static void +zone_free(malloc_zone_t *zone, void *ptr) +{ + + if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) { + je_free(ptr); + return; + } + + free(ptr); +} + +static void * +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) +{ + + if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) + return (je_realloc(ptr, size)); + + return (realloc(ptr, size)); +} + +static void * +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) +{ + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, alignment, size); + + return (ret); +} + +static void +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) +{ + size_t alloc_size; + + alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof); + if (alloc_size != 0) { + assert(alloc_size == size); + je_free(ptr); + return; + } + + free(ptr); +} + +static void +zone_destroy(malloc_zone_t *zone) +{ + + /* This function should never be called. */ + not_reached(); +} + +static unsigned +zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, + unsigned num_requested) +{ + unsigned i; + + for (i = 0; i < num_requested; i++) { + results[i] = je_malloc(size); + if (!results[i]) + break; + } + + return i; +} + +static void +zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, + unsigned num_to_be_freed) +{ + unsigned i; + + for (i = 0; i < num_to_be_freed; i++) { + zone_free(zone, to_be_freed[i]); + to_be_freed[i] = NULL; + } +} + +static size_t +zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) +{ + return 0; +} + +static size_t +zone_good_size(malloc_zone_t *zone, size_t size) +{ + + if (size == 0) + size = 1; + return (s2u(size)); +} + +static kern_return_t +zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder) +{ + return KERN_SUCCESS; +} + +static boolean_t +zone_check(malloc_zone_t *zone) +{ + return true; +} + +static void +zone_print(malloc_zone_t *zone, boolean_t verbose) +{ +} + +static void +zone_log(malloc_zone_t *zone, void *address) +{ +} + +static void +zone_force_lock(malloc_zone_t *zone) +{ + + if (isthreaded) + jemalloc_prefork(); +} + +static void +zone_force_unlock(malloc_zone_t *zone) +{ + + /* + * Call jemalloc_postfork_child() rather than + * jemalloc_postfork_parent(), because this function is executed by both + * parent and child. The parent can tolerate having state + * reinitialized, but the child cannot unlock mutexes that were locked + * by the parent. + */ + if (isthreaded) + jemalloc_postfork_child(); +} + +static void +zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) +{ + /* We make no effort to actually fill the values */ + stats->blocks_in_use = 0; + stats->size_in_use = 0; + stats->max_size_in_use = 0; + stats->size_allocated = 0; +} + +static boolean_t +zone_locked(malloc_zone_t *zone) +{ + /* Pretend no lock is being held */ + return false; +} + +static void +zone_reinit_lock(malloc_zone_t *zone) +{ + /* As of OSX 10.12, this function is only used when force_unlock would + * be used if the zone version were < 9. So just use force_unlock. */ + zone_force_unlock(zone); +} + +static void +zone_init(void) +{ + + jemalloc_zone.size = zone_size; + jemalloc_zone.malloc = zone_malloc; + jemalloc_zone.calloc = zone_calloc; + jemalloc_zone.valloc = zone_valloc; + jemalloc_zone.free = zone_free; + jemalloc_zone.realloc = zone_realloc; + jemalloc_zone.destroy = zone_destroy; + jemalloc_zone.zone_name = "jemalloc_zone"; + jemalloc_zone.batch_malloc = zone_batch_malloc; + jemalloc_zone.batch_free = zone_batch_free; + jemalloc_zone.introspect = &jemalloc_zone_introspect; + jemalloc_zone.version = 9; + jemalloc_zone.memalign = zone_memalign; + jemalloc_zone.free_definite_size = zone_free_definite_size; + jemalloc_zone.pressure_relief = zone_pressure_relief; + + jemalloc_zone_introspect.enumerator = zone_enumerator; + jemalloc_zone_introspect.good_size = zone_good_size; + jemalloc_zone_introspect.check = zone_check; + jemalloc_zone_introspect.print = zone_print; + jemalloc_zone_introspect.log = zone_log; + jemalloc_zone_introspect.force_lock = zone_force_lock; + jemalloc_zone_introspect.force_unlock = zone_force_unlock; + jemalloc_zone_introspect.statistics = zone_statistics; + jemalloc_zone_introspect.zone_locked = zone_locked; + jemalloc_zone_introspect.enable_discharge_checking = NULL; + jemalloc_zone_introspect.disable_discharge_checking = NULL; + jemalloc_zone_introspect.discharge = NULL; +#ifdef __BLOCKS__ + jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; +#else + jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; +#endif + jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; +} + +static malloc_zone_t * +zone_default_get(void) +{ + malloc_zone_t **zones = NULL; + unsigned int num_zones = 0; + + /* + * On OSX 10.12, malloc_default_zone returns a special zone that is not + * present in the list of registered zones. That zone uses a "lite zone" + * if one is present (apparently enabled when malloc stack logging is + * enabled), or the first registered zone otherwise. In practice this + * means unless malloc stack logging is enabled, the first registered + * zone is the default. So get the list of zones to get the first one, + * instead of relying on malloc_default_zone. + */ + if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, + (vm_address_t**)&zones, &num_zones)) { + /* + * Reset the value in case the failure happened after it was + * set. + */ + num_zones = 0; + } + + if (num_zones) + return (zones[0]); + + return (malloc_default_zone()); +} + +/* As written, this function can only promote jemalloc_zone. */ +static void +zone_promote(void) +{ + malloc_zone_t *zone; + + do { + /* + * Unregister and reregister the default zone. On OSX >= 10.6, + * unregistering takes the last registered zone and places it + * at the location of the specified zone. Unregistering the + * default zone thus makes the last registered one the default. + * On OSX < 10.6, unregistering shifts all registered zones. + * The first registered zone then becomes the default. + */ + malloc_zone_unregister(default_zone); + malloc_zone_register(default_zone); + + /* + * On OSX 10.6, having the default purgeable zone appear before + * the default zone makes some things crash because it thinks it + * owns the default zone allocated pointers. We thus + * unregister/re-register it in order to ensure it's always + * after the default zone. On OSX < 10.6, there is no purgeable + * zone, so this does nothing. On OSX >= 10.6, unregistering + * replaces the purgeable zone with the last registered zone + * above, i.e. the default zone. Registering it again then puts + * it at the end, obviously after the default zone. + */ + if (purgeable_zone != NULL) { + malloc_zone_unregister(purgeable_zone); + malloc_zone_register(purgeable_zone); + } + + zone = zone_default_get(); + } while (zone != &jemalloc_zone); +} + +JEMALLOC_ATTR(constructor) +void +zone_register(void) +{ + + /* + * If something else replaced the system default zone allocator, don't + * register jemalloc's. + */ + default_zone = zone_default_get(); + if (!default_zone->zone_name || strcmp(default_zone->zone_name, + "DefaultMallocZone") != 0) + return; + + /* + * The default purgeable zone is created lazily by OSX's libc. It uses + * the default zone when it is created for "small" allocations + * (< 15 KiB), but assumes the default zone is a scalable_zone. This + * obviously fails when the default zone is the jemalloc zone, so + * malloc_default_purgeable_zone() is called beforehand so that the + * default purgeable zone is created when the default zone is still + * a scalable_zone. As purgeable zones only exist on >= 10.6, we need + * to check for the existence of malloc_default_purgeable_zone() at + * run time. + */ + purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : + malloc_default_purgeable_zone(); + + /* Register the custom zone. At this point it won't be the default. */ + zone_init(); + malloc_zone_register(&jemalloc_zone); + + /* Promote the custom zone to be default. */ + zone_promote(); +} diff --git a/sources/khronos/EGL/egl.h b/sources/khronos/EGL/egl.h new file mode 100755 index 00000000..1db274c4 --- /dev/null +++ b/sources/khronos/EGL/egl.h @@ -0,0 +1,247 @@ +#ifndef __egl_h_ +#define __egl_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** http://www.khronos.org/registry/egl +** +** Khronos $Git commit SHA1: a732b061e7 $ on $Git commit date: 2017-06-17 23:27:53 +0100 $ +*/ + +#include + +/* Generated on date 20170627 */ + +/* Generated C header for: + * API: egl + * Versions considered: .* + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef EGL_VERSION_1_0 +#define EGL_VERSION_1_0 1 +typedef unsigned int EGLBoolean; +typedef void *EGLDisplay; +#include +#include +typedef void *EGLConfig; +typedef void *EGLSurface; +typedef void *EGLContext; +typedef void (*__eglMustCastToProperFunctionPointerType)(void); +#define EGL_ALPHA_SIZE 0x3021 +#define EGL_BAD_ACCESS 0x3002 +#define EGL_BAD_ALLOC 0x3003 +#define EGL_BAD_ATTRIBUTE 0x3004 +#define EGL_BAD_CONFIG 0x3005 +#define EGL_BAD_CONTEXT 0x3006 +#define EGL_BAD_CURRENT_SURFACE 0x3007 +#define EGL_BAD_DISPLAY 0x3008 +#define EGL_BAD_MATCH 0x3009 +#define EGL_BAD_NATIVE_PIXMAP 0x300A +#define EGL_BAD_NATIVE_WINDOW 0x300B +#define EGL_BAD_PARAMETER 0x300C +#define EGL_BAD_SURFACE 0x300D +#define EGL_BLUE_SIZE 0x3022 +#define EGL_BUFFER_SIZE 0x3020 +#define EGL_CONFIG_CAVEAT 0x3027 +#define EGL_CONFIG_ID 0x3028 +#define EGL_CORE_NATIVE_ENGINE 0x305B +#define EGL_DEPTH_SIZE 0x3025 +#define EGL_DONT_CARE EGL_CAST(EGLint,-1) +#define EGL_DRAW 0x3059 +#define EGL_EXTENSIONS 0x3055 +#define EGL_FALSE 0 +#define EGL_GREEN_SIZE 0x3023 +#define EGL_HEIGHT 0x3056 +#define EGL_LARGEST_PBUFFER 0x3058 +#define EGL_LEVEL 0x3029 +#define EGL_MAX_PBUFFER_HEIGHT 0x302A +#define EGL_MAX_PBUFFER_PIXELS 0x302B +#define EGL_MAX_PBUFFER_WIDTH 0x302C +#define EGL_NATIVE_RENDERABLE 0x302D +#define EGL_NATIVE_VISUAL_ID 0x302E +#define EGL_NATIVE_VISUAL_TYPE 0x302F +#define EGL_NONE 0x3038 +#define EGL_NON_CONFORMANT_CONFIG 0x3051 +#define EGL_NOT_INITIALIZED 0x3001 +#define EGL_NO_CONTEXT EGL_CAST(EGLContext,0) +#define EGL_NO_DISPLAY EGL_CAST(EGLDisplay,0) +#define EGL_NO_SURFACE EGL_CAST(EGLSurface,0) +#define EGL_PBUFFER_BIT 0x0001 +#define EGL_PIXMAP_BIT 0x0002 +#define EGL_READ 0x305A +#define EGL_RED_SIZE 0x3024 +#define EGL_SAMPLES 0x3031 +#define EGL_SAMPLE_BUFFERS 0x3032 +#define EGL_SLOW_CONFIG 0x3050 +#define EGL_STENCIL_SIZE 0x3026 +#define EGL_SUCCESS 0x3000 +#define EGL_SURFACE_TYPE 0x3033 +#define EGL_TRANSPARENT_BLUE_VALUE 0x3035 +#define EGL_TRANSPARENT_GREEN_VALUE 0x3036 +#define EGL_TRANSPARENT_RED_VALUE 0x3037 +#define EGL_TRANSPARENT_RGB 0x3052 +#define EGL_TRANSPARENT_TYPE 0x3034 +#define EGL_TRUE 1 +#define EGL_VENDOR 0x3053 +#define EGL_VERSION 0x3054 +#define EGL_WIDTH 0x3057 +#define EGL_WINDOW_BIT 0x0004 +EGLAPI EGLBoolean EGLAPIENTRY eglChooseConfig (EGLDisplay dpy, const EGLint *attrib_list, EGLConfig *configs, EGLint config_size, EGLint *num_config); +EGLAPI EGLBoolean EGLAPIENTRY eglCopyBuffers (EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target); +EGLAPI EGLContext EGLAPIENTRY eglCreateContext (EGLDisplay dpy, EGLConfig config, EGLContext share_context, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePbufferSurface (EGLDisplay dpy, EGLConfig config, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePixmapSurface (EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreateWindowSurface (EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroyContext (EGLDisplay dpy, EGLContext ctx); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroySurface (EGLDisplay dpy, EGLSurface surface); +EGLAPI EGLBoolean EGLAPIENTRY eglGetConfigAttrib (EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint *value); +EGLAPI EGLBoolean EGLAPIENTRY eglGetConfigs (EGLDisplay dpy, EGLConfig *configs, EGLint config_size, EGLint *num_config); +EGLAPI EGLDisplay EGLAPIENTRY eglGetCurrentDisplay (void); +EGLAPI EGLSurface EGLAPIENTRY eglGetCurrentSurface (EGLint readdraw); +EGLAPI EGLDisplay EGLAPIENTRY eglGetDisplay (EGLNativeDisplayType display_id); +EGLAPI EGLint EGLAPIENTRY eglGetError (void); +EGLAPI __eglMustCastToProperFunctionPointerType EGLAPIENTRY eglGetProcAddress (const char *procname); +EGLAPI EGLBoolean EGLAPIENTRY eglInitialize (EGLDisplay dpy, EGLint *major, EGLint *minor); +EGLAPI EGLBoolean EGLAPIENTRY eglMakeCurrent (EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryContext (EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint *value); +EGLAPI const char *EGLAPIENTRY eglQueryString (EGLDisplay dpy, EGLint name); +EGLAPI EGLBoolean EGLAPIENTRY eglQuerySurface (EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint *value); +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffers (EGLDisplay dpy, EGLSurface surface); +EGLAPI EGLBoolean EGLAPIENTRY eglTerminate (EGLDisplay dpy); +EGLAPI EGLBoolean EGLAPIENTRY eglWaitGL (void); +EGLAPI EGLBoolean EGLAPIENTRY eglWaitNative (EGLint engine); +#endif /* EGL_VERSION_1_0 */ + +#ifndef EGL_VERSION_1_1 +#define EGL_VERSION_1_1 1 +#define EGL_BACK_BUFFER 0x3084 +#define EGL_BIND_TO_TEXTURE_RGB 0x3039 +#define EGL_BIND_TO_TEXTURE_RGBA 0x303A +#define EGL_CONTEXT_LOST 0x300E +#define EGL_MIN_SWAP_INTERVAL 0x303B +#define EGL_MAX_SWAP_INTERVAL 0x303C +#define EGL_MIPMAP_TEXTURE 0x3082 +#define EGL_MIPMAP_LEVEL 0x3083 +#define EGL_NO_TEXTURE 0x305C +#define EGL_TEXTURE_2D 0x305F +#define EGL_TEXTURE_FORMAT 0x3080 +#define EGL_TEXTURE_RGB 0x305D +#define EGL_TEXTURE_RGBA 0x305E +#define EGL_TEXTURE_TARGET 0x3081 +EGLAPI EGLBoolean EGLAPIENTRY eglBindTexImage (EGLDisplay dpy, EGLSurface surface, EGLint buffer); +EGLAPI EGLBoolean EGLAPIENTRY eglReleaseTexImage (EGLDisplay dpy, EGLSurface surface, EGLint buffer); +EGLAPI EGLBoolean EGLAPIENTRY eglSurfaceAttrib (EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value); +EGLAPI EGLBoolean EGLAPIENTRY eglSwapInterval (EGLDisplay dpy, EGLint interval); +#endif /* EGL_VERSION_1_1 */ + +#ifndef EGL_VERSION_1_2 +#define EGL_VERSION_1_2 1 +typedef unsigned int EGLenum; +typedef void *EGLClientBuffer; +#define EGL_ALPHA_FORMAT 0x3088 +#define EGL_ALPHA_FORMAT_NONPRE 0x308B +#define EGL_ALPHA_FORMAT_PRE 0x308C +#define EGL_ALPHA_MASK_SIZE 0x303E +#define EGL_BUFFER_PRESERVED 0x3094 +#define EGL_BUFFER_DESTROYED 0x3095 +#define EGL_CLIENT_APIS 0x308D +#define EGL_COLORSPACE 0x3087 +#define EGL_COLORSPACE_sRGB 0x3089 +#define EGL_COLORSPACE_LINEAR 0x308A +#define EGL_COLOR_BUFFER_TYPE 0x303F +#define EGL_CONTEXT_CLIENT_TYPE 0x3097 +#define EGL_DISPLAY_SCALING 10000 +#define EGL_HORIZONTAL_RESOLUTION 0x3090 +#define EGL_LUMINANCE_BUFFER 0x308F +#define EGL_LUMINANCE_SIZE 0x303D +#define EGL_OPENGL_ES_BIT 0x0001 +#define EGL_OPENVG_BIT 0x0002 +#define EGL_OPENGL_ES_API 0x30A0 +#define EGL_OPENVG_API 0x30A1 +#define EGL_OPENVG_IMAGE 0x3096 +#define EGL_PIXEL_ASPECT_RATIO 0x3092 +#define EGL_RENDERABLE_TYPE 0x3040 +#define EGL_RENDER_BUFFER 0x3086 +#define EGL_RGB_BUFFER 0x308E +#define EGL_SINGLE_BUFFER 0x3085 +#define EGL_SWAP_BEHAVIOR 0x3093 +#define EGL_UNKNOWN EGL_CAST(EGLint,-1) +#define EGL_VERTICAL_RESOLUTION 0x3091 +EGLAPI EGLBoolean EGLAPIENTRY eglBindAPI (EGLenum api); +EGLAPI EGLenum EGLAPIENTRY eglQueryAPI (void); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePbufferFromClientBuffer (EGLDisplay dpy, EGLenum buftype, EGLClientBuffer buffer, EGLConfig config, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglReleaseThread (void); +EGLAPI EGLBoolean EGLAPIENTRY eglWaitClient (void); +#endif /* EGL_VERSION_1_2 */ + +#ifndef EGL_VERSION_1_3 +#define EGL_VERSION_1_3 1 +#define EGL_CONFORMANT 0x3042 +#define EGL_CONTEXT_CLIENT_VERSION 0x3098 +#define EGL_MATCH_NATIVE_PIXMAP 0x3041 +#define EGL_OPENGL_ES2_BIT 0x0004 +#define EGL_VG_ALPHA_FORMAT 0x3088 +#define EGL_VG_ALPHA_FORMAT_NONPRE 0x308B +#define EGL_VG_ALPHA_FORMAT_PRE 0x308C +#define EGL_VG_ALPHA_FORMAT_PRE_BIT 0x0040 +#define EGL_VG_COLORSPACE 0x3087 +#define EGL_VG_COLORSPACE_sRGB 0x3089 +#define EGL_VG_COLORSPACE_LINEAR 0x308A +#define EGL_VG_COLORSPACE_LINEAR_BIT 0x0020 +#endif /* EGL_VERSION_1_3 */ + +#ifndef EGL_VERSION_1_4 +#define EGL_VERSION_1_4 1 +#define EGL_DEFAULT_DISPLAY EGL_CAST(EGLNativeDisplayType,0) +#define EGL_MULTISAMPLE_RESOLVE_BOX_BIT 0x0200 +#define EGL_MULTISAMPLE_RESOLVE 0x3099 +#define EGL_MULTISAMPLE_RESOLVE_DEFAULT 0x309A +#define EGL_MULTISAMPLE_RESOLVE_BOX 0x309B +#define EGL_OPENGL_API 0x30A2 +#define EGL_OPENGL_BIT 0x0008 +#define EGL_SWAP_BEHAVIOR_PRESERVED_BIT 0x0400 +EGLAPI EGLContext EGLAPIENTRY eglGetCurrentContext (void); +#endif /* EGL_VERSION_1_4 */ + +/* This version of Android does not yet support EGL 1.5, but the following + * portion of EGL 1.5 is included in order to support portions of "eglext.h". + */ +typedef intptr_t EGLAttrib; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/EGL/eglext.h b/sources/khronos/EGL/eglext.h new file mode 100755 index 00000000..466768ae --- /dev/null +++ b/sources/khronos/EGL/eglext.h @@ -0,0 +1,1296 @@ +#ifndef __eglext_h_ +#define __eglext_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** http://www.khronos.org/registry/egl +** +** Khronos $Git commit SHA1: a732b061e7 $ on $Git commit date: 2017-06-17 23:27:53 +0100 $ +*/ + +#include + +#define EGL_EGLEXT_VERSION 20170627 + +/* Generated C header for: + * API: egl + * Versions considered: .* + * Versions emitted: _nomatch_^ + * Default extensions included: egl + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef EGL_KHR_cl_event +#define EGL_KHR_cl_event 1 +#define EGL_CL_EVENT_HANDLE_KHR 0x309C +#define EGL_SYNC_CL_EVENT_KHR 0x30FE +#define EGL_SYNC_CL_EVENT_COMPLETE_KHR 0x30FF +#endif /* EGL_KHR_cl_event */ + +#ifndef EGL_KHR_cl_event2 +#define EGL_KHR_cl_event2 1 +typedef void *EGLSyncKHR; +typedef intptr_t EGLAttribKHR; +typedef EGLSyncKHR (EGLAPIENTRYP PFNEGLCREATESYNC64KHRPROC) (EGLDisplay dpy, EGLenum type, const EGLAttribKHR *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSyncKHR EGLAPIENTRY eglCreateSync64KHR (EGLDisplay dpy, EGLenum type, const EGLAttribKHR *attrib_list); +#endif +#endif /* EGL_KHR_cl_event2 */ + +#ifndef EGL_KHR_client_get_all_proc_addresses +#define EGL_KHR_client_get_all_proc_addresses 1 +#endif /* EGL_KHR_client_get_all_proc_addresses */ + +#ifndef EGL_KHR_config_attribs +#define EGL_KHR_config_attribs 1 +#define EGL_CONFORMANT_KHR 0x3042 +#define EGL_VG_COLORSPACE_LINEAR_BIT_KHR 0x0020 +#define EGL_VG_ALPHA_FORMAT_PRE_BIT_KHR 0x0040 +#endif /* EGL_KHR_config_attribs */ + +#ifndef EGL_KHR_context_flush_control +#define EGL_KHR_context_flush_control 1 +#define EGL_CONTEXT_RELEASE_BEHAVIOR_NONE_KHR 0 +#define EGL_CONTEXT_RELEASE_BEHAVIOR_KHR 0x2097 +#define EGL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR 0x2098 +#endif /* EGL_KHR_context_flush_control */ + +#ifndef EGL_KHR_create_context +#define EGL_KHR_create_context 1 +#define EGL_CONTEXT_MAJOR_VERSION_KHR 0x3098 +#define EGL_CONTEXT_MINOR_VERSION_KHR 0x30FB +#define EGL_CONTEXT_FLAGS_KHR 0x30FC +#define EGL_CONTEXT_OPENGL_PROFILE_MASK_KHR 0x30FD +#define EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR 0x31BD +#define EGL_NO_RESET_NOTIFICATION_KHR 0x31BE +#define EGL_LOSE_CONTEXT_ON_RESET_KHR 0x31BF +#define EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR 0x00000001 +#define EGL_CONTEXT_OPENGL_FORWARD_COMPATIBLE_BIT_KHR 0x00000002 +#define EGL_CONTEXT_OPENGL_ROBUST_ACCESS_BIT_KHR 0x00000004 +#define EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT_KHR 0x00000001 +#define EGL_CONTEXT_OPENGL_COMPATIBILITY_PROFILE_BIT_KHR 0x00000002 +#define EGL_OPENGL_ES3_BIT_KHR 0x00000040 +#endif /* EGL_KHR_create_context */ + +#ifndef EGL_KHR_create_context_no_error +#define EGL_KHR_create_context_no_error 1 +#define EGL_CONTEXT_OPENGL_NO_ERROR_KHR 0x31B3 +#endif /* EGL_KHR_create_context_no_error */ + +#ifndef EGL_KHR_debug +#define EGL_KHR_debug 1 +typedef void *EGLLabelKHR; +typedef void *EGLObjectKHR; +typedef void (EGLAPIENTRY *EGLDEBUGPROCKHR)(EGLenum error,const char *command,EGLint messageType,EGLLabelKHR threadLabel,EGLLabelKHR objectLabel,const char* message); +#define EGL_OBJECT_THREAD_KHR 0x33B0 +#define EGL_OBJECT_DISPLAY_KHR 0x33B1 +#define EGL_OBJECT_CONTEXT_KHR 0x33B2 +#define EGL_OBJECT_SURFACE_KHR 0x33B3 +#define EGL_OBJECT_IMAGE_KHR 0x33B4 +#define EGL_OBJECT_SYNC_KHR 0x33B5 +#define EGL_OBJECT_STREAM_KHR 0x33B6 +#define EGL_DEBUG_MSG_CRITICAL_KHR 0x33B9 +#define EGL_DEBUG_MSG_ERROR_KHR 0x33BA +#define EGL_DEBUG_MSG_WARN_KHR 0x33BB +#define EGL_DEBUG_MSG_INFO_KHR 0x33BC +#define EGL_DEBUG_CALLBACK_KHR 0x33B8 +typedef EGLint (EGLAPIENTRYP PFNEGLDEBUGMESSAGECONTROLKHRPROC) (EGLDEBUGPROCKHR callback, const EGLAttrib *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDEBUGKHRPROC) (EGLint attribute, EGLAttrib *value); +typedef EGLint (EGLAPIENTRYP PFNEGLLABELOBJECTKHRPROC) (EGLDisplay display, EGLenum objectType, EGLObjectKHR object, EGLLabelKHR label); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLint EGLAPIENTRY eglDebugMessageControlKHR (EGLDEBUGPROCKHR callback, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDebugKHR (EGLint attribute, EGLAttrib *value); +EGLAPI EGLint EGLAPIENTRY eglLabelObjectKHR (EGLDisplay display, EGLenum objectType, EGLObjectKHR object, EGLLabelKHR label); +#endif +#endif /* EGL_KHR_debug */ + +#ifndef EGL_KHR_display_reference +#define EGL_KHR_display_reference 1 +#define EGL_TRACK_REFERENCES_KHR 0x3352 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDISPLAYATTRIBKHRPROC) (EGLDisplay dpy, EGLint name, EGLAttrib *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDisplayAttribKHR (EGLDisplay dpy, EGLint name, EGLAttrib *value); +#endif +#endif /* EGL_KHR_display_reference */ + +#ifndef EGL_KHR_fence_sync +#define EGL_KHR_fence_sync 1 +typedef khronos_utime_nanoseconds_t EGLTimeKHR; +#ifdef KHRONOS_SUPPORT_INT64 +#define EGL_SYNC_PRIOR_COMMANDS_COMPLETE_KHR 0x30F0 +#define EGL_SYNC_CONDITION_KHR 0x30F8 +#define EGL_SYNC_FENCE_KHR 0x30F9 +typedef EGLSyncKHR (EGLAPIENTRYP PFNEGLCREATESYNCKHRPROC) (EGLDisplay dpy, EGLenum type, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLDESTROYSYNCKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync); +typedef EGLint (EGLAPIENTRYP PFNEGLCLIENTWAITSYNCKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync, EGLint flags, EGLTimeKHR timeout); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETSYNCATTRIBKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync, EGLint attribute, EGLint *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSyncKHR EGLAPIENTRY eglCreateSyncKHR (EGLDisplay dpy, EGLenum type, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroySyncKHR (EGLDisplay dpy, EGLSyncKHR sync); +EGLAPI EGLint EGLAPIENTRY eglClientWaitSyncKHR (EGLDisplay dpy, EGLSyncKHR sync, EGLint flags, EGLTimeKHR timeout); +EGLAPI EGLBoolean EGLAPIENTRY eglGetSyncAttribKHR (EGLDisplay dpy, EGLSyncKHR sync, EGLint attribute, EGLint *value); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_KHR_fence_sync */ + +#ifndef EGL_KHR_get_all_proc_addresses +#define EGL_KHR_get_all_proc_addresses 1 +#endif /* EGL_KHR_get_all_proc_addresses */ + +#ifndef EGL_KHR_gl_colorspace +#define EGL_KHR_gl_colorspace 1 +#define EGL_GL_COLORSPACE_KHR 0x309D +#define EGL_GL_COLORSPACE_SRGB_KHR 0x3089 +#define EGL_GL_COLORSPACE_LINEAR_KHR 0x308A +#endif /* EGL_KHR_gl_colorspace */ + +#ifndef EGL_KHR_gl_renderbuffer_image +#define EGL_KHR_gl_renderbuffer_image 1 +#define EGL_GL_RENDERBUFFER_KHR 0x30B9 +#endif /* EGL_KHR_gl_renderbuffer_image */ + +#ifndef EGL_KHR_gl_texture_2D_image +#define EGL_KHR_gl_texture_2D_image 1 +#define EGL_GL_TEXTURE_2D_KHR 0x30B1 +#define EGL_GL_TEXTURE_LEVEL_KHR 0x30BC +#endif /* EGL_KHR_gl_texture_2D_image */ + +#ifndef EGL_KHR_gl_texture_3D_image +#define EGL_KHR_gl_texture_3D_image 1 +#define EGL_GL_TEXTURE_3D_KHR 0x30B2 +#define EGL_GL_TEXTURE_ZOFFSET_KHR 0x30BD +#endif /* EGL_KHR_gl_texture_3D_image */ + +#ifndef EGL_KHR_gl_texture_cubemap_image +#define EGL_KHR_gl_texture_cubemap_image 1 +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR 0x30B3 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X_KHR 0x30B4 +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y_KHR 0x30B5 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_KHR 0x30B6 +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z_KHR 0x30B7 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR 0x30B8 +#endif /* EGL_KHR_gl_texture_cubemap_image */ + +#ifndef EGL_KHR_image +#define EGL_KHR_image 1 +typedef void *EGLImageKHR; +#define EGL_NATIVE_PIXMAP_KHR 0x30B0 +#define EGL_NO_IMAGE_KHR EGL_CAST(EGLImageKHR,0) +typedef EGLImageKHR (EGLAPIENTRYP PFNEGLCREATEIMAGEKHRPROC) (EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLDESTROYIMAGEKHRPROC) (EGLDisplay dpy, EGLImageKHR image); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLImageKHR EGLAPIENTRY eglCreateImageKHR (EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroyImageKHR (EGLDisplay dpy, EGLImageKHR image); +#endif +#endif /* EGL_KHR_image */ + +#ifndef EGL_KHR_image_base +#define EGL_KHR_image_base 1 +#define EGL_IMAGE_PRESERVED_KHR 0x30D2 +#endif /* EGL_KHR_image_base */ + +#ifndef EGL_KHR_image_pixmap +#define EGL_KHR_image_pixmap 1 +#endif /* EGL_KHR_image_pixmap */ + +#ifndef EGL_KHR_lock_surface +#define EGL_KHR_lock_surface 1 +#define EGL_READ_SURFACE_BIT_KHR 0x0001 +#define EGL_WRITE_SURFACE_BIT_KHR 0x0002 +#define EGL_LOCK_SURFACE_BIT_KHR 0x0080 +#define EGL_OPTIMAL_FORMAT_BIT_KHR 0x0100 +#define EGL_MATCH_FORMAT_KHR 0x3043 +#define EGL_FORMAT_RGB_565_EXACT_KHR 0x30C0 +#define EGL_FORMAT_RGB_565_KHR 0x30C1 +#define EGL_FORMAT_RGBA_8888_EXACT_KHR 0x30C2 +#define EGL_FORMAT_RGBA_8888_KHR 0x30C3 +#define EGL_MAP_PRESERVE_PIXELS_KHR 0x30C4 +#define EGL_LOCK_USAGE_HINT_KHR 0x30C5 +#define EGL_BITMAP_POINTER_KHR 0x30C6 +#define EGL_BITMAP_PITCH_KHR 0x30C7 +#define EGL_BITMAP_ORIGIN_KHR 0x30C8 +#define EGL_BITMAP_PIXEL_RED_OFFSET_KHR 0x30C9 +#define EGL_BITMAP_PIXEL_GREEN_OFFSET_KHR 0x30CA +#define EGL_BITMAP_PIXEL_BLUE_OFFSET_KHR 0x30CB +#define EGL_BITMAP_PIXEL_ALPHA_OFFSET_KHR 0x30CC +#define EGL_BITMAP_PIXEL_LUMINANCE_OFFSET_KHR 0x30CD +#define EGL_LOWER_LEFT_KHR 0x30CE +#define EGL_UPPER_LEFT_KHR 0x30CF +typedef EGLBoolean (EGLAPIENTRYP PFNEGLLOCKSURFACEKHRPROC) (EGLDisplay dpy, EGLSurface surface, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLUNLOCKSURFACEKHRPROC) (EGLDisplay dpy, EGLSurface surface); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglLockSurfaceKHR (EGLDisplay dpy, EGLSurface surface, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglUnlockSurfaceKHR (EGLDisplay dpy, EGLSurface surface); +#endif +#endif /* EGL_KHR_lock_surface */ + +#ifndef EGL_KHR_lock_surface2 +#define EGL_KHR_lock_surface2 1 +#define EGL_BITMAP_PIXEL_SIZE_KHR 0x3110 +#endif /* EGL_KHR_lock_surface2 */ + +#ifndef EGL_KHR_lock_surface3 +#define EGL_KHR_lock_surface3 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSURFACE64KHRPROC) (EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLAttribKHR *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQuerySurface64KHR (EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLAttribKHR *value); +#endif +#endif /* EGL_KHR_lock_surface3 */ + +#ifndef EGL_KHR_mutable_render_buffer +#define EGL_KHR_mutable_render_buffer 1 +#define EGL_MUTABLE_RENDER_BUFFER_BIT_KHR 0x1000 +#endif /* EGL_KHR_mutable_render_buffer */ + +#ifndef EGL_KHR_no_config_context +#define EGL_KHR_no_config_context 1 +#define EGL_NO_CONFIG_KHR EGL_CAST(EGLConfig,0) +#endif /* EGL_KHR_no_config_context */ + +#ifndef EGL_KHR_partial_update +#define EGL_KHR_partial_update 1 +#define EGL_BUFFER_AGE_KHR 0x313D +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSETDAMAGEREGIONKHRPROC) (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSetDamageRegionKHR (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#endif +#endif /* EGL_KHR_partial_update */ + +#ifndef EGL_KHR_platform_android +#define EGL_KHR_platform_android 1 +#define EGL_PLATFORM_ANDROID_KHR 0x3141 +#endif /* EGL_KHR_platform_android */ + +#ifndef EGL_KHR_platform_gbm +#define EGL_KHR_platform_gbm 1 +#define EGL_PLATFORM_GBM_KHR 0x31D7 +#endif /* EGL_KHR_platform_gbm */ + +#ifndef EGL_KHR_platform_wayland +#define EGL_KHR_platform_wayland 1 +#define EGL_PLATFORM_WAYLAND_KHR 0x31D8 +#endif /* EGL_KHR_platform_wayland */ + +#ifndef EGL_KHR_platform_x11 +#define EGL_KHR_platform_x11 1 +#define EGL_PLATFORM_X11_KHR 0x31D5 +#define EGL_PLATFORM_X11_SCREEN_KHR 0x31D6 +#endif /* EGL_KHR_platform_x11 */ + +#ifndef EGL_KHR_reusable_sync +#define EGL_KHR_reusable_sync 1 +#ifdef KHRONOS_SUPPORT_INT64 +#define EGL_SYNC_STATUS_KHR 0x30F1 +#define EGL_SIGNALED_KHR 0x30F2 +#define EGL_UNSIGNALED_KHR 0x30F3 +#define EGL_TIMEOUT_EXPIRED_KHR 0x30F5 +#define EGL_CONDITION_SATISFIED_KHR 0x30F6 +#define EGL_SYNC_TYPE_KHR 0x30F7 +#define EGL_SYNC_REUSABLE_KHR 0x30FA +#define EGL_SYNC_FLUSH_COMMANDS_BIT_KHR 0x0001 +#define EGL_FOREVER_KHR 0xFFFFFFFFFFFFFFFFull +#define EGL_NO_SYNC_KHR EGL_CAST(EGLSyncKHR,0) +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSIGNALSYNCKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync, EGLenum mode); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSignalSyncKHR (EGLDisplay dpy, EGLSyncKHR sync, EGLenum mode); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_KHR_reusable_sync */ + +#ifndef EGL_KHR_stream +#define EGL_KHR_stream 1 +typedef void *EGLStreamKHR; +typedef khronos_uint64_t EGLuint64KHR; +#ifdef KHRONOS_SUPPORT_INT64 +#define EGL_NO_STREAM_KHR EGL_CAST(EGLStreamKHR,0) +#define EGL_CONSUMER_LATENCY_USEC_KHR 0x3210 +#define EGL_PRODUCER_FRAME_KHR 0x3212 +#define EGL_CONSUMER_FRAME_KHR 0x3213 +#define EGL_STREAM_STATE_KHR 0x3214 +#define EGL_STREAM_STATE_CREATED_KHR 0x3215 +#define EGL_STREAM_STATE_CONNECTING_KHR 0x3216 +#define EGL_STREAM_STATE_EMPTY_KHR 0x3217 +#define EGL_STREAM_STATE_NEW_FRAME_AVAILABLE_KHR 0x3218 +#define EGL_STREAM_STATE_OLD_FRAME_AVAILABLE_KHR 0x3219 +#define EGL_STREAM_STATE_DISCONNECTED_KHR 0x321A +#define EGL_BAD_STREAM_KHR 0x321B +#define EGL_BAD_STATE_KHR 0x321C +typedef EGLStreamKHR (EGLAPIENTRYP PFNEGLCREATESTREAMKHRPROC) (EGLDisplay dpy, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLDESTROYSTREAMKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint *value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMU64KHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLuint64KHR *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLStreamKHR EGLAPIENTRY eglCreateStreamKHR (EGLDisplay dpy, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroyStreamKHR (EGLDisplay dpy, EGLStreamKHR stream); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint *value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamu64KHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLuint64KHR *value); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_KHR_stream */ + +#ifndef EGL_KHR_stream_attrib +#define EGL_KHR_stream_attrib 1 +#ifdef KHRONOS_SUPPORT_INT64 +typedef EGLStreamKHR (EGLAPIENTRYP PFNEGLCREATESTREAMATTRIBKHRPROC) (EGLDisplay dpy, const EGLAttrib *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSETSTREAMATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib *value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERACQUIREATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERRELEASEATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLStreamKHR EGLAPIENTRY eglCreateStreamAttribKHR (EGLDisplay dpy, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglSetStreamAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib *value); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerAcquireAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerReleaseAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_KHR_stream_attrib */ + +#ifndef EGL_KHR_stream_consumer_gltexture +#define EGL_KHR_stream_consumer_gltexture 1 +#ifdef EGL_KHR_stream +#define EGL_CONSUMER_ACQUIRE_TIMEOUT_USEC_KHR 0x321E +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERGLTEXTUREEXTERNALKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERACQUIREKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERRELEASEKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerGLTextureExternalKHR (EGLDisplay dpy, EGLStreamKHR stream); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerAcquireKHR (EGLDisplay dpy, EGLStreamKHR stream); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerReleaseKHR (EGLDisplay dpy, EGLStreamKHR stream); +#endif +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_consumer_gltexture */ + +#ifndef EGL_KHR_stream_cross_process_fd +#define EGL_KHR_stream_cross_process_fd 1 +typedef int EGLNativeFileDescriptorKHR; +#ifdef EGL_KHR_stream +#define EGL_NO_FILE_DESCRIPTOR_KHR EGL_CAST(EGLNativeFileDescriptorKHR,-1) +typedef EGLNativeFileDescriptorKHR (EGLAPIENTRYP PFNEGLGETSTREAMFILEDESCRIPTORKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +typedef EGLStreamKHR (EGLAPIENTRYP PFNEGLCREATESTREAMFROMFILEDESCRIPTORKHRPROC) (EGLDisplay dpy, EGLNativeFileDescriptorKHR file_descriptor); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLNativeFileDescriptorKHR EGLAPIENTRY eglGetStreamFileDescriptorKHR (EGLDisplay dpy, EGLStreamKHR stream); +EGLAPI EGLStreamKHR EGLAPIENTRY eglCreateStreamFromFileDescriptorKHR (EGLDisplay dpy, EGLNativeFileDescriptorKHR file_descriptor); +#endif +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_cross_process_fd */ + +#ifndef EGL_KHR_stream_fifo +#define EGL_KHR_stream_fifo 1 +#ifdef EGL_KHR_stream +#define EGL_STREAM_FIFO_LENGTH_KHR 0x31FC +#define EGL_STREAM_TIME_NOW_KHR 0x31FD +#define EGL_STREAM_TIME_CONSUMER_KHR 0x31FE +#define EGL_STREAM_TIME_PRODUCER_KHR 0x31FF +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMTIMEKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLTimeKHR *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamTimeKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLTimeKHR *value); +#endif +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_fifo */ + +#ifndef EGL_KHR_stream_producer_aldatalocator +#define EGL_KHR_stream_producer_aldatalocator 1 +#ifdef EGL_KHR_stream +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_producer_aldatalocator */ + +#ifndef EGL_KHR_stream_producer_eglsurface +#define EGL_KHR_stream_producer_eglsurface 1 +#ifdef EGL_KHR_stream +#define EGL_STREAM_BIT_KHR 0x0800 +typedef EGLSurface (EGLAPIENTRYP PFNEGLCREATESTREAMPRODUCERSURFACEKHRPROC) (EGLDisplay dpy, EGLConfig config, EGLStreamKHR stream, const EGLint *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSurface EGLAPIENTRY eglCreateStreamProducerSurfaceKHR (EGLDisplay dpy, EGLConfig config, EGLStreamKHR stream, const EGLint *attrib_list); +#endif +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_producer_eglsurface */ + +#ifndef EGL_KHR_surfaceless_context +#define EGL_KHR_surfaceless_context 1 +#endif /* EGL_KHR_surfaceless_context */ + +#ifndef EGL_KHR_swap_buffers_with_damage +#define EGL_KHR_swap_buffers_with_damage 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSWAPBUFFERSWITHDAMAGEKHRPROC) (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffersWithDamageKHR (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#endif +#endif /* EGL_KHR_swap_buffers_with_damage */ + +#ifndef EGL_KHR_vg_parent_image +#define EGL_KHR_vg_parent_image 1 +#define EGL_VG_PARENT_IMAGE_KHR 0x30BA +#endif /* EGL_KHR_vg_parent_image */ + +#ifndef EGL_KHR_wait_sync +#define EGL_KHR_wait_sync 1 +typedef EGLint (EGLAPIENTRYP PFNEGLWAITSYNCKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync, EGLint flags); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLint EGLAPIENTRY eglWaitSyncKHR (EGLDisplay dpy, EGLSyncKHR sync, EGLint flags); +#endif +#endif /* EGL_KHR_wait_sync */ + +#ifndef EGL_ANDROID_blob_cache +#define EGL_ANDROID_blob_cache 1 +typedef khronos_ssize_t EGLsizeiANDROID; +typedef void (*EGLSetBlobFuncANDROID) (const void *key, EGLsizeiANDROID keySize, const void *value, EGLsizeiANDROID valueSize); +typedef EGLsizeiANDROID (*EGLGetBlobFuncANDROID) (const void *key, EGLsizeiANDROID keySize, void *value, EGLsizeiANDROID valueSize); +typedef void (EGLAPIENTRYP PFNEGLSETBLOBCACHEFUNCSANDROIDPROC) (EGLDisplay dpy, EGLSetBlobFuncANDROID set, EGLGetBlobFuncANDROID get); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI void EGLAPIENTRY eglSetBlobCacheFuncsANDROID (EGLDisplay dpy, EGLSetBlobFuncANDROID set, EGLGetBlobFuncANDROID get); +#endif +#endif /* EGL_ANDROID_blob_cache */ + +#ifndef EGL_ANDROID_create_native_client_buffer +#define EGL_ANDROID_create_native_client_buffer 1 +#define EGL_NATIVE_BUFFER_USAGE_ANDROID 0x3143 +#define EGL_NATIVE_BUFFER_USAGE_PROTECTED_BIT_ANDROID 0x00000001 +#define EGL_NATIVE_BUFFER_USAGE_RENDERBUFFER_BIT_ANDROID 0x00000002 +#define EGL_NATIVE_BUFFER_USAGE_TEXTURE_BIT_ANDROID 0x00000004 +typedef EGLClientBuffer (EGLAPIENTRYP PFNEGLCREATENATIVECLIENTBUFFERANDROIDPROC) (const EGLint *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLClientBuffer EGLAPIENTRY eglCreateNativeClientBufferANDROID (const EGLint *attrib_list); +#endif +#endif /* EGL_ANDROID_create_native_client_buffer */ + +#ifndef EGL_ANDROID_framebuffer_target +#define EGL_ANDROID_framebuffer_target 1 +#define EGL_FRAMEBUFFER_TARGET_ANDROID 0x3147 +#endif /* EGL_ANDROID_framebuffer_target */ + +#ifndef EGL_ANDROID_front_buffer_auto_refresh +#define EGL_ANDROID_front_buffer_auto_refresh 1 +#define EGL_FRONT_BUFFER_AUTO_REFRESH_ANDROID 0x314C +#endif /* EGL_ANDROID_front_buffer_auto_refresh */ + +#ifndef EGL_ANDROID_image_crop +#define EGL_ANDROID_image_crop 1 +#define EGL_IMAGE_CROP_LEFT_ANDROID 0x3148 +#define EGL_IMAGE_CROP_TOP_ANDROID 0x3149 +#define EGL_IMAGE_CROP_RIGHT_ANDROID 0x314A +#define EGL_IMAGE_CROP_BOTTOM_ANDROID 0x314B +#endif + +#ifndef EGL_ANDROID_image_native_buffer +#define EGL_ANDROID_image_native_buffer 1 +#define EGL_NATIVE_BUFFER_ANDROID 0x3140 +#endif /* EGL_ANDROID_image_native_buffer */ + +#ifndef EGL_KHR_mutable_render_buffer +#define EGL_KHR_mutable_render_buffer 1 +#define EGL_MUTABLE_RENDER_BUFFER_BIT_KHR 0x1000 +#endif + +#ifndef EGL_ANDROID_native_fence_sync +#define EGL_ANDROID_native_fence_sync 1 +#define EGL_SYNC_NATIVE_FENCE_ANDROID 0x3144 +#define EGL_SYNC_NATIVE_FENCE_FD_ANDROID 0x3145 +#define EGL_SYNC_NATIVE_FENCE_SIGNALED_ANDROID 0x3146 +#define EGL_NO_NATIVE_FENCE_FD_ANDROID -1 +typedef EGLint (EGLAPIENTRYP PFNEGLDUPNATIVEFENCEFDANDROIDPROC) (EGLDisplay dpy, EGLSyncKHR sync); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLint EGLAPIENTRY eglDupNativeFenceFDANDROID (EGLDisplay dpy, EGLSyncKHR sync); +#endif +#endif /* EGL_ANDROID_native_fence_sync */ + +#ifndef EGL_ANDROID_presentation_time +#define EGL_ANDROID_presentation_time 1 +typedef khronos_stime_nanoseconds_t EGLnsecsANDROID; +typedef EGLBoolean (EGLAPIENTRYP PFNEGLPRESENTATIONTIMEANDROIDPROC) (EGLDisplay dpy, EGLSurface surface, EGLnsecsANDROID time); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglPresentationTimeANDROID (EGLDisplay dpy, EGLSurface surface, EGLnsecsANDROID time); +#endif +#endif /* EGL_ANDROID_presentation_time */ + +#ifndef EGL_ANDROID_get_frame_timestamps +#define EGL_ANDROID_get_frame_timestamps 1 +#define EGL_TIMESTAMPS_ANDROID 0x3430 +#define EGL_COMPOSITE_DEADLINE_ANDROID 0x3431 +#define EGL_COMPOSITE_INTERVAL_ANDROID 0x3432 +#define EGL_COMPOSITE_TO_PRESENT_LATENCY_ANDROID 0x3433 +#define EGL_REQUESTED_PRESENT_TIME_ANDROID 0x3434 +#define EGL_RENDERING_COMPLETE_TIME_ANDROID 0x3435 +#define EGL_COMPOSITION_LATCH_TIME_ANDROID 0x3436 +#define EGL_FIRST_COMPOSITION_START_TIME_ANDROID 0x3437 +#define EGL_LAST_COMPOSITION_START_TIME_ANDROID 0x3438 +#define EGL_FIRST_COMPOSITION_GPU_FINISHED_TIME_ANDROID 0x3439 +#define EGL_DISPLAY_PRESENT_TIME_ANDROID 0x343A +#define EGL_DEQUEUE_READY_TIME_ANDROID 0x343B +#define EGL_READS_DONE_TIME_ANDROID 0x343C +#define EGL_TIMESTAMP_PENDING_ANDROID EGL_CAST(EGLnsecsANDROID, -2) +#define EGL_TIMESTAMP_INVALID_ANDROID EGL_CAST(EGLnsecsANDROID, -1) +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean eglGetNextFrameIdANDROID(EGLDisplay dpy, EGLSurface surface, EGLuint64KHR *frameId); +EGLAPI EGLBoolean eglGetCompositorTimingANDROID(EGLDisplay dpy, EGLSurface surface, EGLint numTimestamps, const EGLint *names, EGLnsecsANDROID *values); +EGLAPI EGLBoolean eglGetCompositorTimingSupportedANDROID(EGLDisplay dpy, EGLSurface surface, EGLint name); +EGLAPI EGLBoolean eglGetFrameTimestampsANDROID(EGLDisplay dpy, EGLSurface surface, EGLuint64KHR frameId, EGLint numTimestamps, const EGLint *timestamps, EGLnsecsANDROID *values); +EGLAPI EGLBoolean eglGetFrameTimestampSupportedANDROID(EGLDisplay dpy, EGLSurface surface, EGLint timestamp); +#else +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETNEXTFRAMEIDANDROID) (EGLDisplay dpy, EGLSurface surface, EGLuint64KHR *frameId); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETCOMPOSITORTIMINGANDROID) (EGLDisplay dpy, EGLSurface surface, EGLint numTimestamps, const EGLint *names, EGLnsecsANDROID *values); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETCOMPOSITORTIMINGSUPPORTEDANDROID) (EGLDisplay dpy, EGLSurface surface, EGLint name); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETFRAMETIMESTAMPSANDROID) (EGLDisplay dpy, EGLSurface surface, EGLuint64KHR frameId, EGLint numTimestamps, const EGLint *timestamps, EGLnsecsANDROID *values); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETFRAMETIMESTAMPSUPPORTEDANDROID) (EGLDisplay dpy, EGLSurface surface, EGLint timestamp); +#endif +#endif + +#ifndef EGL_ANDROID_recordable +#define EGL_ANDROID_recordable 1 +#define EGL_RECORDABLE_ANDROID 0x3142 +#endif /* EGL_ANDROID_recordable */ + +#ifndef EGL_ANGLE_d3d_share_handle_client_buffer +#define EGL_ANGLE_d3d_share_handle_client_buffer 1 +#define EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE 0x3200 +#endif /* EGL_ANGLE_d3d_share_handle_client_buffer */ + +#ifndef EGL_ANGLE_device_d3d +#define EGL_ANGLE_device_d3d 1 +#define EGL_D3D9_DEVICE_ANGLE 0x33A0 +#define EGL_D3D11_DEVICE_ANGLE 0x33A1 +#endif /* EGL_ANGLE_device_d3d */ + +#ifndef EGL_ANGLE_query_surface_pointer +#define EGL_ANGLE_query_surface_pointer 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSURFACEPOINTERANGLEPROC) (EGLDisplay dpy, EGLSurface surface, EGLint attribute, void **value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQuerySurfacePointerANGLE (EGLDisplay dpy, EGLSurface surface, EGLint attribute, void **value); +#endif +#endif /* EGL_ANGLE_query_surface_pointer */ + +#ifndef EGL_ANGLE_surface_d3d_texture_2d_share_handle +#define EGL_ANGLE_surface_d3d_texture_2d_share_handle 1 +#endif /* EGL_ANGLE_surface_d3d_texture_2d_share_handle */ + +#ifndef EGL_ANGLE_window_fixed_size +#define EGL_ANGLE_window_fixed_size 1 +#define EGL_FIXED_SIZE_ANGLE 0x3201 +#endif /* EGL_ANGLE_window_fixed_size */ + +#ifndef EGL_ARM_implicit_external_sync +#define EGL_ARM_implicit_external_sync 1 +#define EGL_SYNC_PRIOR_COMMANDS_IMPLICIT_EXTERNAL_ARM 0x328A +#endif /* EGL_ARM_implicit_external_sync */ + +#ifndef EGL_ARM_pixmap_multisample_discard +#define EGL_ARM_pixmap_multisample_discard 1 +#define EGL_DISCARD_SAMPLES_ARM 0x3286 +#endif /* EGL_ARM_pixmap_multisample_discard */ + +#ifndef EGL_EXT_bind_to_front +#define EGL_EXT_bind_to_front 1 +#define EGL_FRONT_BUFFER_EXT 0x3464 +#endif /* EGL_EXT_bind_to_front */ + +#ifndef EGL_EXT_buffer_age +#define EGL_EXT_buffer_age 1 +#define EGL_BUFFER_AGE_EXT 0x313D +#endif /* EGL_EXT_buffer_age */ + +#ifndef EGL_EXT_client_extensions +#define EGL_EXT_client_extensions 1 +#endif /* EGL_EXT_client_extensions */ + +#ifndef EGL_EXT_compositor +#define EGL_EXT_compositor 1 +#define EGL_PRIMARY_COMPOSITOR_CONTEXT_EXT 0x3460 +#define EGL_EXTERNAL_REF_ID_EXT 0x3461 +#define EGL_COMPOSITOR_DROP_NEWEST_FRAME_EXT 0x3462 +#define EGL_COMPOSITOR_KEEP_NEWEST_FRAME_EXT 0x3463 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETCONTEXTLISTEXTPROC) (const EGLint *external_ref_ids, EGLint num_entries); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETCONTEXTATTRIBUTESEXTPROC) (EGLint external_ref_id, const EGLint *context_attributes, EGLint num_entries); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETWINDOWLISTEXTPROC) (EGLint external_ref_id, const EGLint *external_win_ids, EGLint num_entries); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETWINDOWATTRIBUTESEXTPROC) (EGLint external_win_id, const EGLint *window_attributes, EGLint num_entries); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORBINDTEXWINDOWEXTPROC) (EGLint external_win_id); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETSIZEEXTPROC) (EGLint external_win_id, EGLint width, EGLint height); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSWAPPOLICYEXTPROC) (EGLint external_win_id, EGLint policy); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetContextListEXT (const EGLint *external_ref_ids, EGLint num_entries); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetContextAttributesEXT (EGLint external_ref_id, const EGLint *context_attributes, EGLint num_entries); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetWindowListEXT (EGLint external_ref_id, const EGLint *external_win_ids, EGLint num_entries); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetWindowAttributesEXT (EGLint external_win_id, const EGLint *window_attributes, EGLint num_entries); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorBindTexWindowEXT (EGLint external_win_id); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetSizeEXT (EGLint external_win_id, EGLint width, EGLint height); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSwapPolicyEXT (EGLint external_win_id, EGLint policy); +#endif +#endif /* EGL_EXT_compositor */ + +#ifndef EGL_EXT_create_context_robustness +#define EGL_EXT_create_context_robustness 1 +#define EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT 0x30BF +#define EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_EXT 0x3138 +#define EGL_NO_RESET_NOTIFICATION_EXT 0x31BE +#define EGL_LOSE_CONTEXT_ON_RESET_EXT 0x31BF +#endif /* EGL_EXT_create_context_robustness */ + +#ifndef EGL_EXT_device_base +#define EGL_EXT_device_base 1 +typedef void *EGLDeviceEXT; +#define EGL_NO_DEVICE_EXT EGL_CAST(EGLDeviceEXT,0) +#define EGL_BAD_DEVICE_EXT 0x322B +#define EGL_DEVICE_EXT 0x322C +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDEVICEATTRIBEXTPROC) (EGLDeviceEXT device, EGLint attribute, EGLAttrib *value); +typedef const char *(EGLAPIENTRYP PFNEGLQUERYDEVICESTRINGEXTPROC) (EGLDeviceEXT device, EGLint name); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDEVICESEXTPROC) (EGLint max_devices, EGLDeviceEXT *devices, EGLint *num_devices); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDISPLAYATTRIBEXTPROC) (EGLDisplay dpy, EGLint attribute, EGLAttrib *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDeviceAttribEXT (EGLDeviceEXT device, EGLint attribute, EGLAttrib *value); +EGLAPI const char *EGLAPIENTRY eglQueryDeviceStringEXT (EGLDeviceEXT device, EGLint name); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDevicesEXT (EGLint max_devices, EGLDeviceEXT *devices, EGLint *num_devices); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDisplayAttribEXT (EGLDisplay dpy, EGLint attribute, EGLAttrib *value); +#endif +#endif /* EGL_EXT_device_base */ + +#ifndef EGL_EXT_device_drm +#define EGL_EXT_device_drm 1 +#define EGL_DRM_DEVICE_FILE_EXT 0x3233 +#endif /* EGL_EXT_device_drm */ + +#ifndef EGL_EXT_device_enumeration +#define EGL_EXT_device_enumeration 1 +#endif /* EGL_EXT_device_enumeration */ + +#ifndef EGL_EXT_device_openwf +#define EGL_EXT_device_openwf 1 +#define EGL_OPENWF_DEVICE_ID_EXT 0x3237 +#endif /* EGL_EXT_device_openwf */ + +#ifndef EGL_EXT_device_query +#define EGL_EXT_device_query 1 +#endif /* EGL_EXT_device_query */ + +#ifndef EGL_EXT_gl_colorspace_bt2020_linear +#define EGL_EXT_gl_colorspace_bt2020_linear 1 +#define EGL_GL_COLORSPACE_BT2020_LINEAR_EXT 0x333F +#endif /* EGL_EXT_gl_colorspace_bt2020_linear */ + +#ifndef EGL_EXT_gl_colorspace_bt2020_pq +#define EGL_EXT_gl_colorspace_bt2020_pq 1 +#define EGL_GL_COLORSPACE_BT2020_PQ_EXT 0x3340 +#endif /* EGL_EXT_gl_colorspace_bt2020_pq */ + +#ifndef EGL_EXT_gl_colorspace_display_p3 +#define EGL_EXT_gl_colorspace_display_p3 1 +#define EGL_GL_COLORSPACE_DISPLAY_P3_EXT 0x3363 +#endif /* EGL_EXT_gl_colorspace_display_p3 */ + +#ifndef EGL_EXT_gl_colorspace_display_p3_linear +#define EGL_EXT_gl_colorspace_display_p3_linear 1 +#define EGL_GL_COLORSPACE_DISPLAY_P3_LINEAR_EXT 0x3362 +#endif /* EGL_EXT_gl_colorspace_display_p3_linear */ + +#ifndef EGL_EXT_gl_colorspace_scrgb +#define EGL_EXT_gl_colorspace_scrgb 1 +#define EGL_GL_COLORSPACE_SCRGB_EXT 0x3351 +#endif /* EGL_EXT_gl_colorspace_scrgb */ + +#ifndef EGL_EXT_gl_colorspace_scrgb_linear +#define EGL_EXT_gl_colorspace_scrgb_linear 1 +#define EGL_GL_COLORSPACE_SCRGB_LINEAR_EXT 0x3350 +#endif /* EGL_EXT_gl_colorspace_scrgb_linear */ + +#ifndef EGL_EXT_image_dma_buf_import +#define EGL_EXT_image_dma_buf_import 1 +#define EGL_LINUX_DMA_BUF_EXT 0x3270 +#define EGL_LINUX_DRM_FOURCC_EXT 0x3271 +#define EGL_DMA_BUF_PLANE0_FD_EXT 0x3272 +#define EGL_DMA_BUF_PLANE0_OFFSET_EXT 0x3273 +#define EGL_DMA_BUF_PLANE0_PITCH_EXT 0x3274 +#define EGL_DMA_BUF_PLANE1_FD_EXT 0x3275 +#define EGL_DMA_BUF_PLANE1_OFFSET_EXT 0x3276 +#define EGL_DMA_BUF_PLANE1_PITCH_EXT 0x3277 +#define EGL_DMA_BUF_PLANE2_FD_EXT 0x3278 +#define EGL_DMA_BUF_PLANE2_OFFSET_EXT 0x3279 +#define EGL_DMA_BUF_PLANE2_PITCH_EXT 0x327A +#define EGL_YUV_COLOR_SPACE_HINT_EXT 0x327B +#define EGL_SAMPLE_RANGE_HINT_EXT 0x327C +#define EGL_YUV_CHROMA_HORIZONTAL_SITING_HINT_EXT 0x327D +#define EGL_YUV_CHROMA_VERTICAL_SITING_HINT_EXT 0x327E +#define EGL_ITU_REC601_EXT 0x327F +#define EGL_ITU_REC709_EXT 0x3280 +#define EGL_ITU_REC2020_EXT 0x3281 +#define EGL_YUV_FULL_RANGE_EXT 0x3282 +#define EGL_YUV_NARROW_RANGE_EXT 0x3283 +#define EGL_YUV_CHROMA_SITING_0_EXT 0x3284 +#define EGL_YUV_CHROMA_SITING_0_5_EXT 0x3285 +#endif /* EGL_EXT_image_dma_buf_import */ + +#ifndef EGL_EXT_image_dma_buf_import_modifiers +#define EGL_EXT_image_dma_buf_import_modifiers 1 +#define EGL_DMA_BUF_PLANE3_FD_EXT 0x3440 +#define EGL_DMA_BUF_PLANE3_OFFSET_EXT 0x3441 +#define EGL_DMA_BUF_PLANE3_PITCH_EXT 0x3442 +#define EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT 0x3443 +#define EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT 0x3444 +#define EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT 0x3445 +#define EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT 0x3446 +#define EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT 0x3447 +#define EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT 0x3448 +#define EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT 0x3449 +#define EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT 0x344A +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDMABUFFORMATSEXTPROC) (EGLDisplay dpy, EGLint max_formats, EGLint *formats, EGLint *num_formats); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDMABUFMODIFIERSEXTPROC) (EGLDisplay dpy, EGLint format, EGLint max_modifiers, EGLuint64KHR *modifiers, EGLBoolean *external_only, EGLint *num_modifiers); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDmaBufFormatsEXT (EGLDisplay dpy, EGLint max_formats, EGLint *formats, EGLint *num_formats); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDmaBufModifiersEXT (EGLDisplay dpy, EGLint format, EGLint max_modifiers, EGLuint64KHR *modifiers, EGLBoolean *external_only, EGLint *num_modifiers); +#endif +#endif /* EGL_EXT_image_dma_buf_import_modifiers */ + +#ifndef EGL_EXT_image_implicit_sync_control +#define EGL_EXT_image_implicit_sync_control 1 +#define EGL_IMPORT_SYNC_TYPE_EXT 0x3470 +#define EGL_IMPORT_IMPLICIT_SYNC_EXT 0x3471 +#define EGL_IMPORT_EXPLICIT_SYNC_EXT 0x3472 +#endif /* EGL_EXT_image_implicit_sync_control */ + +#ifndef EGL_EXT_multiview_window +#define EGL_EXT_multiview_window 1 +#define EGL_MULTIVIEW_VIEW_COUNT_EXT 0x3134 +#endif /* EGL_EXT_multiview_window */ + +#ifndef EGL_EXT_output_base +#define EGL_EXT_output_base 1 +typedef void *EGLOutputLayerEXT; +typedef void *EGLOutputPortEXT; +#define EGL_NO_OUTPUT_LAYER_EXT EGL_CAST(EGLOutputLayerEXT,0) +#define EGL_NO_OUTPUT_PORT_EXT EGL_CAST(EGLOutputPortEXT,0) +#define EGL_BAD_OUTPUT_LAYER_EXT 0x322D +#define EGL_BAD_OUTPUT_PORT_EXT 0x322E +#define EGL_SWAP_INTERVAL_EXT 0x322F +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETOUTPUTLAYERSEXTPROC) (EGLDisplay dpy, const EGLAttrib *attrib_list, EGLOutputLayerEXT *layers, EGLint max_layers, EGLint *num_layers); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETOUTPUTPORTSEXTPROC) (EGLDisplay dpy, const EGLAttrib *attrib_list, EGLOutputPortEXT *ports, EGLint max_ports, EGLint *num_ports); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLOUTPUTLAYERATTRIBEXTPROC) (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYOUTPUTLAYERATTRIBEXTPROC) (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib *value); +typedef const char *(EGLAPIENTRYP PFNEGLQUERYOUTPUTLAYERSTRINGEXTPROC) (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint name); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLOUTPUTPORTATTRIBEXTPROC) (EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYOUTPUTPORTATTRIBEXTPROC) (EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib *value); +typedef const char *(EGLAPIENTRYP PFNEGLQUERYOUTPUTPORTSTRINGEXTPROC) (EGLDisplay dpy, EGLOutputPortEXT port, EGLint name); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglGetOutputLayersEXT (EGLDisplay dpy, const EGLAttrib *attrib_list, EGLOutputLayerEXT *layers, EGLint max_layers, EGLint *num_layers); +EGLAPI EGLBoolean EGLAPIENTRY eglGetOutputPortsEXT (EGLDisplay dpy, const EGLAttrib *attrib_list, EGLOutputPortEXT *ports, EGLint max_ports, EGLint *num_ports); +EGLAPI EGLBoolean EGLAPIENTRY eglOutputLayerAttribEXT (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryOutputLayerAttribEXT (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib *value); +EGLAPI const char *EGLAPIENTRY eglQueryOutputLayerStringEXT (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint name); +EGLAPI EGLBoolean EGLAPIENTRY eglOutputPortAttribEXT (EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryOutputPortAttribEXT (EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib *value); +EGLAPI const char *EGLAPIENTRY eglQueryOutputPortStringEXT (EGLDisplay dpy, EGLOutputPortEXT port, EGLint name); +#endif +#endif /* EGL_EXT_output_base */ + +#ifndef EGL_EXT_output_drm +#define EGL_EXT_output_drm 1 +#define EGL_DRM_CRTC_EXT 0x3234 +#define EGL_DRM_PLANE_EXT 0x3235 +#define EGL_DRM_CONNECTOR_EXT 0x3236 +#endif /* EGL_EXT_output_drm */ + +#ifndef EGL_EXT_output_openwf +#define EGL_EXT_output_openwf 1 +#define EGL_OPENWF_PIPELINE_ID_EXT 0x3238 +#define EGL_OPENWF_PORT_ID_EXT 0x3239 +#endif /* EGL_EXT_output_openwf */ + +#ifndef EGL_EXT_pixel_format_float +#define EGL_EXT_pixel_format_float 1 +#define EGL_COLOR_COMPONENT_TYPE_EXT 0x3339 +#define EGL_COLOR_COMPONENT_TYPE_FIXED_EXT 0x333A +#define EGL_COLOR_COMPONENT_TYPE_FLOAT_EXT 0x333B +#endif /* EGL_EXT_pixel_format_float */ + +#ifndef EGL_EXT_platform_base +#define EGL_EXT_platform_base 1 +typedef EGLDisplay (EGLAPIENTRYP PFNEGLGETPLATFORMDISPLAYEXTPROC) (EGLenum platform, void *native_display, const EGLint *attrib_list); +typedef EGLSurface (EGLAPIENTRYP PFNEGLCREATEPLATFORMWINDOWSURFACEEXTPROC) (EGLDisplay dpy, EGLConfig config, void *native_window, const EGLint *attrib_list); +typedef EGLSurface (EGLAPIENTRYP PFNEGLCREATEPLATFORMPIXMAPSURFACEEXTPROC) (EGLDisplay dpy, EGLConfig config, void *native_pixmap, const EGLint *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLDisplay EGLAPIENTRY eglGetPlatformDisplayEXT (EGLenum platform, void *native_display, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePlatformWindowSurfaceEXT (EGLDisplay dpy, EGLConfig config, void *native_window, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePlatformPixmapSurfaceEXT (EGLDisplay dpy, EGLConfig config, void *native_pixmap, const EGLint *attrib_list); +#endif +#endif /* EGL_EXT_platform_base */ + +#ifndef EGL_EXT_platform_device +#define EGL_EXT_platform_device 1 +#define EGL_PLATFORM_DEVICE_EXT 0x313F +#endif /* EGL_EXT_platform_device */ + +#ifndef EGL_EXT_platform_wayland +#define EGL_EXT_platform_wayland 1 +#define EGL_PLATFORM_WAYLAND_EXT 0x31D8 +#endif /* EGL_EXT_platform_wayland */ + +#ifndef EGL_EXT_platform_x11 +#define EGL_EXT_platform_x11 1 +#define EGL_PLATFORM_X11_EXT 0x31D5 +#define EGL_PLATFORM_X11_SCREEN_EXT 0x31D6 +#endif /* EGL_EXT_platform_x11 */ + +#ifndef EGL_EXT_protected_content +#define EGL_EXT_protected_content 1 +#define EGL_PROTECTED_CONTENT_EXT 0x32C0 +#endif /* EGL_EXT_protected_content */ + +#ifndef EGL_EXT_protected_surface +#define EGL_EXT_protected_surface 1 +#endif /* EGL_EXT_protected_surface */ + +#ifndef EGL_EXT_stream_consumer_egloutput +#define EGL_EXT_stream_consumer_egloutput 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMEROUTPUTEXTPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLOutputLayerEXT layer); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerOutputEXT (EGLDisplay dpy, EGLStreamKHR stream, EGLOutputLayerEXT layer); +#endif +#endif /* EGL_EXT_stream_consumer_egloutput */ + +#ifndef EGL_EXT_surface_CTA861_3_metadata +#define EGL_EXT_surface_CTA861_3_metadata 1 +#define EGL_CTA861_3_MAX_CONTENT_LIGHT_LEVEL_EXT 0x3360 +#define EGL_CTA861_3_MAX_FRAME_AVERAGE_LEVEL_EXT 0x3361 +#endif /* EGL_EXT_surface_CTA861_3_metadata */ + +#ifndef EGL_EXT_surface_SMPTE2086_metadata +#define EGL_EXT_surface_SMPTE2086_metadata 1 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_RX_EXT 0x3341 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_RY_EXT 0x3342 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_GX_EXT 0x3343 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_GY_EXT 0x3344 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_BX_EXT 0x3345 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_BY_EXT 0x3346 +#define EGL_SMPTE2086_WHITE_POINT_X_EXT 0x3347 +#define EGL_SMPTE2086_WHITE_POINT_Y_EXT 0x3348 +#define EGL_SMPTE2086_MAX_LUMINANCE_EXT 0x3349 +#define EGL_SMPTE2086_MIN_LUMINANCE_EXT 0x334A +#define EGL_METADATA_SCALING_EXT 50000 +#endif /* EGL_EXT_surface_SMPTE2086_metadata */ + +#ifndef EGL_EXT_swap_buffers_with_damage +#define EGL_EXT_swap_buffers_with_damage 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSWAPBUFFERSWITHDAMAGEEXTPROC) (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffersWithDamageEXT (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#endif +#endif /* EGL_EXT_swap_buffers_with_damage */ + +#ifndef EGL_EXT_yuv_surface +#define EGL_EXT_yuv_surface 1 +#define EGL_YUV_ORDER_EXT 0x3301 +#define EGL_YUV_NUMBER_OF_PLANES_EXT 0x3311 +#define EGL_YUV_SUBSAMPLE_EXT 0x3312 +#define EGL_YUV_DEPTH_RANGE_EXT 0x3317 +#define EGL_YUV_CSC_STANDARD_EXT 0x330A +#define EGL_YUV_PLANE_BPP_EXT 0x331A +#define EGL_YUV_BUFFER_EXT 0x3300 +#define EGL_YUV_ORDER_YUV_EXT 0x3302 +#define EGL_YUV_ORDER_YVU_EXT 0x3303 +#define EGL_YUV_ORDER_YUYV_EXT 0x3304 +#define EGL_YUV_ORDER_UYVY_EXT 0x3305 +#define EGL_YUV_ORDER_YVYU_EXT 0x3306 +#define EGL_YUV_ORDER_VYUY_EXT 0x3307 +#define EGL_YUV_ORDER_AYUV_EXT 0x3308 +#define EGL_YUV_SUBSAMPLE_4_2_0_EXT 0x3313 +#define EGL_YUV_SUBSAMPLE_4_2_2_EXT 0x3314 +#define EGL_YUV_SUBSAMPLE_4_4_4_EXT 0x3315 +#define EGL_YUV_DEPTH_RANGE_LIMITED_EXT 0x3318 +#define EGL_YUV_DEPTH_RANGE_FULL_EXT 0x3319 +#define EGL_YUV_CSC_STANDARD_601_EXT 0x330B +#define EGL_YUV_CSC_STANDARD_709_EXT 0x330C +#define EGL_YUV_CSC_STANDARD_2020_EXT 0x330D +#define EGL_YUV_PLANE_BPP_0_EXT 0x331B +#define EGL_YUV_PLANE_BPP_8_EXT 0x331C +#define EGL_YUV_PLANE_BPP_10_EXT 0x331D +#endif /* EGL_EXT_yuv_surface */ + +#ifndef EGL_HI_clientpixmap +#define EGL_HI_clientpixmap 1 +struct EGLClientPixmapHI { + void *pData; + EGLint iWidth; + EGLint iHeight; + EGLint iStride; +}; +#define EGL_CLIENT_PIXMAP_POINTER_HI 0x8F74 +typedef EGLSurface (EGLAPIENTRYP PFNEGLCREATEPIXMAPSURFACEHIPROC) (EGLDisplay dpy, EGLConfig config, struct EGLClientPixmapHI *pixmap); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSurface EGLAPIENTRY eglCreatePixmapSurfaceHI (EGLDisplay dpy, EGLConfig config, struct EGLClientPixmapHI *pixmap); +#endif +#endif /* EGL_HI_clientpixmap */ + +#ifndef EGL_HI_colorformats +#define EGL_HI_colorformats 1 +#define EGL_COLOR_FORMAT_HI 0x8F70 +#define EGL_COLOR_RGB_HI 0x8F71 +#define EGL_COLOR_RGBA_HI 0x8F72 +#define EGL_COLOR_ARGB_HI 0x8F73 +#endif /* EGL_HI_colorformats */ + +#ifndef EGL_IMG_context_priority +#define EGL_IMG_context_priority 1 +#define EGL_CONTEXT_PRIORITY_LEVEL_IMG 0x3100 +#define EGL_CONTEXT_PRIORITY_HIGH_IMG 0x3101 +#define EGL_CONTEXT_PRIORITY_MEDIUM_IMG 0x3102 +#define EGL_CONTEXT_PRIORITY_LOW_IMG 0x3103 +#endif /* EGL_IMG_context_priority */ + +#ifndef EGL_IMG_image_plane_attribs +#define EGL_IMG_image_plane_attribs 1 +#define EGL_NATIVE_BUFFER_MULTIPLANE_SEPARATE_IMG 0x3105 +#define EGL_NATIVE_BUFFER_PLANE_OFFSET_IMG 0x3106 +#endif /* EGL_IMG_image_plane_attribs */ + +#ifndef EGL_MESA_drm_image +#define EGL_MESA_drm_image 1 +#define EGL_DRM_BUFFER_FORMAT_MESA 0x31D0 +#define EGL_DRM_BUFFER_USE_MESA 0x31D1 +#define EGL_DRM_BUFFER_FORMAT_ARGB32_MESA 0x31D2 +#define EGL_DRM_BUFFER_MESA 0x31D3 +#define EGL_DRM_BUFFER_STRIDE_MESA 0x31D4 +#define EGL_DRM_BUFFER_USE_SCANOUT_MESA 0x00000001 +#define EGL_DRM_BUFFER_USE_SHARE_MESA 0x00000002 +typedef EGLImageKHR (EGLAPIENTRYP PFNEGLCREATEDRMIMAGEMESAPROC) (EGLDisplay dpy, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLEXPORTDRMIMAGEMESAPROC) (EGLDisplay dpy, EGLImageKHR image, EGLint *name, EGLint *handle, EGLint *stride); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLImageKHR EGLAPIENTRY eglCreateDRMImageMESA (EGLDisplay dpy, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglExportDRMImageMESA (EGLDisplay dpy, EGLImageKHR image, EGLint *name, EGLint *handle, EGLint *stride); +#endif +#endif /* EGL_MESA_drm_image */ + +#ifndef EGL_ANDROID_get_native_client_buffer +#define EGL_ANDROID_get_native_client_buffer 1 +struct AHardwareBuffer; +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLClientBuffer eglGetNativeClientBufferANDROID (const struct AHardwareBuffer *buffer); +#else +typedef EGLClientBuffer (EGLAPIENTRYP PFNEGLGETNATIVECLIENTBUFFERANDROID) (const struct AHardwareBuffer *buffer); +#endif +#endif + +#ifndef EGL_MESA_image_dma_buf_export +#define EGL_MESA_image_dma_buf_export 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLEXPORTDMABUFIMAGEQUERYMESAPROC) (EGLDisplay dpy, EGLImageKHR image, int *fourcc, int *num_planes, EGLuint64KHR *modifiers); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLEXPORTDMABUFIMAGEMESAPROC) (EGLDisplay dpy, EGLImageKHR image, int *fds, EGLint *strides, EGLint *offsets); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglExportDMABUFImageQueryMESA (EGLDisplay dpy, EGLImageKHR image, int *fourcc, int *num_planes, EGLuint64KHR *modifiers); +EGLAPI EGLBoolean EGLAPIENTRY eglExportDMABUFImageMESA (EGLDisplay dpy, EGLImageKHR image, int *fds, EGLint *strides, EGLint *offsets); +#endif +#endif /* EGL_MESA_image_dma_buf_export */ + +#ifndef EGL_MESA_platform_gbm +#define EGL_MESA_platform_gbm 1 +#define EGL_PLATFORM_GBM_MESA 0x31D7 +#endif /* EGL_MESA_platform_gbm */ + +#ifndef EGL_MESA_platform_surfaceless +#define EGL_MESA_platform_surfaceless 1 +#define EGL_PLATFORM_SURFACELESS_MESA 0x31DD +#endif /* EGL_MESA_platform_surfaceless */ + +#ifndef EGL_NOK_swap_region +#define EGL_NOK_swap_region 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSWAPBUFFERSREGIONNOKPROC) (EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint *rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffersRegionNOK (EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint *rects); +#endif +#endif /* EGL_NOK_swap_region */ + +#ifndef EGL_NOK_swap_region2 +#define EGL_NOK_swap_region2 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSWAPBUFFERSREGION2NOKPROC) (EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint *rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffersRegion2NOK (EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint *rects); +#endif +#endif /* EGL_NOK_swap_region2 */ + +#ifndef EGL_NOK_texture_from_pixmap +#define EGL_NOK_texture_from_pixmap 1 +#define EGL_Y_INVERTED_NOK 0x307F +#endif /* EGL_NOK_texture_from_pixmap */ + +#ifndef EGL_NV_3dvision_surface +#define EGL_NV_3dvision_surface 1 +#define EGL_AUTO_STEREO_NV 0x3136 +#endif /* EGL_NV_3dvision_surface */ + +#ifndef EGL_NV_coverage_sample +#define EGL_NV_coverage_sample 1 +#define EGL_COVERAGE_BUFFERS_NV 0x30E0 +#define EGL_COVERAGE_SAMPLES_NV 0x30E1 +#endif /* EGL_NV_coverage_sample */ + +#ifndef EGL_NV_coverage_sample_resolve +#define EGL_NV_coverage_sample_resolve 1 +#define EGL_COVERAGE_SAMPLE_RESOLVE_NV 0x3131 +#define EGL_COVERAGE_SAMPLE_RESOLVE_DEFAULT_NV 0x3132 +#define EGL_COVERAGE_SAMPLE_RESOLVE_NONE_NV 0x3133 +#endif /* EGL_NV_coverage_sample_resolve */ + +#ifndef EGL_NV_cuda_event +#define EGL_NV_cuda_event 1 +#define EGL_CUDA_EVENT_HANDLE_NV 0x323B +#define EGL_SYNC_CUDA_EVENT_NV 0x323C +#define EGL_SYNC_CUDA_EVENT_COMPLETE_NV 0x323D +#endif /* EGL_NV_cuda_event */ + +#ifndef EGL_NV_depth_nonlinear +#define EGL_NV_depth_nonlinear 1 +#define EGL_DEPTH_ENCODING_NV 0x30E2 +#define EGL_DEPTH_ENCODING_NONE_NV 0 +#define EGL_DEPTH_ENCODING_NONLINEAR_NV 0x30E3 +#endif /* EGL_NV_depth_nonlinear */ + +#ifndef EGL_NV_device_cuda +#define EGL_NV_device_cuda 1 +#define EGL_CUDA_DEVICE_NV 0x323A +#endif /* EGL_NV_device_cuda */ + +#ifndef EGL_NV_native_query +#define EGL_NV_native_query 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYNATIVEDISPLAYNVPROC) (EGLDisplay dpy, EGLNativeDisplayType *display_id); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYNATIVEWINDOWNVPROC) (EGLDisplay dpy, EGLSurface surf, EGLNativeWindowType *window); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYNATIVEPIXMAPNVPROC) (EGLDisplay dpy, EGLSurface surf, EGLNativePixmapType *pixmap); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryNativeDisplayNV (EGLDisplay dpy, EGLNativeDisplayType *display_id); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryNativeWindowNV (EGLDisplay dpy, EGLSurface surf, EGLNativeWindowType *window); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryNativePixmapNV (EGLDisplay dpy, EGLSurface surf, EGLNativePixmapType *pixmap); +#endif +#endif /* EGL_NV_native_query */ + +#ifndef EGL_NV_post_convert_rounding +#define EGL_NV_post_convert_rounding 1 +#endif /* EGL_NV_post_convert_rounding */ + +#ifndef EGL_NV_post_sub_buffer +#define EGL_NV_post_sub_buffer 1 +#define EGL_POST_SUB_BUFFER_SUPPORTED_NV 0x30BE +typedef EGLBoolean (EGLAPIENTRYP PFNEGLPOSTSUBBUFFERNVPROC) (EGLDisplay dpy, EGLSurface surface, EGLint x, EGLint y, EGLint width, EGLint height); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglPostSubBufferNV (EGLDisplay dpy, EGLSurface surface, EGLint x, EGLint y, EGLint width, EGLint height); +#endif +#endif /* EGL_NV_post_sub_buffer */ + +#ifndef EGL_NV_robustness_video_memory_purge +#define EGL_NV_robustness_video_memory_purge 1 +#define EGL_GENERATE_RESET_ON_VIDEO_MEMORY_PURGE_NV 0x334C +#endif /* EGL_NV_robustness_video_memory_purge */ + +#ifndef EGL_NV_stream_consumer_gltexture_yuv +#define EGL_NV_stream_consumer_gltexture_yuv 1 +#define EGL_YUV_PLANE0_TEXTURE_UNIT_NV 0x332C +#define EGL_YUV_PLANE1_TEXTURE_UNIT_NV 0x332D +#define EGL_YUV_PLANE2_TEXTURE_UNIT_NV 0x332E +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERGLTEXTUREEXTERNALATTRIBSNVPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLAttrib *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerGLTextureExternalAttribsNV (EGLDisplay dpy, EGLStreamKHR stream, EGLAttrib *attrib_list); +#endif +#endif /* EGL_NV_stream_consumer_gltexture_yuv */ + +#ifndef EGL_NV_stream_cross_display +#define EGL_NV_stream_cross_display 1 +#define EGL_STREAM_CROSS_DISPLAY_NV 0x334E +#endif /* EGL_NV_stream_cross_display */ + +#ifndef EGL_NV_stream_cross_object +#define EGL_NV_stream_cross_object 1 +#define EGL_STREAM_CROSS_OBJECT_NV 0x334D +#endif /* EGL_NV_stream_cross_object */ + +#ifndef EGL_NV_stream_cross_partition +#define EGL_NV_stream_cross_partition 1 +#define EGL_STREAM_CROSS_PARTITION_NV 0x323F +#endif /* EGL_NV_stream_cross_partition */ + +#ifndef EGL_NV_stream_cross_process +#define EGL_NV_stream_cross_process 1 +#define EGL_STREAM_CROSS_PROCESS_NV 0x3245 +#endif /* EGL_NV_stream_cross_process */ + +#ifndef EGL_NV_stream_cross_system +#define EGL_NV_stream_cross_system 1 +#define EGL_STREAM_CROSS_SYSTEM_NV 0x334F +#endif /* EGL_NV_stream_cross_system */ + +#ifndef EGL_NV_stream_fifo_next +#define EGL_NV_stream_fifo_next 1 +#define EGL_PENDING_FRAME_NV 0x3329 +#define EGL_STREAM_TIME_PENDING_NV 0x332A +#endif /* EGL_NV_stream_fifo_next */ + +#ifndef EGL_NV_stream_fifo_synchronous +#define EGL_NV_stream_fifo_synchronous 1 +#define EGL_STREAM_FIFO_SYNCHRONOUS_NV 0x3336 +#endif /* EGL_NV_stream_fifo_synchronous */ + +#ifndef EGL_NV_stream_frame_limits +#define EGL_NV_stream_frame_limits 1 +#define EGL_PRODUCER_MAX_FRAME_HINT_NV 0x3337 +#define EGL_CONSUMER_MAX_FRAME_HINT_NV 0x3338 +#endif /* EGL_NV_stream_frame_limits */ + +#ifndef EGL_NV_stream_metadata +#define EGL_NV_stream_metadata 1 +#define EGL_MAX_STREAM_METADATA_BLOCKS_NV 0x3250 +#define EGL_MAX_STREAM_METADATA_BLOCK_SIZE_NV 0x3251 +#define EGL_MAX_STREAM_METADATA_TOTAL_SIZE_NV 0x3252 +#define EGL_PRODUCER_METADATA_NV 0x3253 +#define EGL_CONSUMER_METADATA_NV 0x3254 +#define EGL_PENDING_METADATA_NV 0x3328 +#define EGL_METADATA0_SIZE_NV 0x3255 +#define EGL_METADATA1_SIZE_NV 0x3256 +#define EGL_METADATA2_SIZE_NV 0x3257 +#define EGL_METADATA3_SIZE_NV 0x3258 +#define EGL_METADATA0_TYPE_NV 0x3259 +#define EGL_METADATA1_TYPE_NV 0x325A +#define EGL_METADATA2_TYPE_NV 0x325B +#define EGL_METADATA3_TYPE_NV 0x325C +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDISPLAYATTRIBNVPROC) (EGLDisplay dpy, EGLint attribute, EGLAttrib *value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSETSTREAMMETADATANVPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLint n, EGLint offset, EGLint size, const void *data); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMMETADATANVPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum name, EGLint n, EGLint offset, EGLint size, void *data); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDisplayAttribNV (EGLDisplay dpy, EGLint attribute, EGLAttrib *value); +EGLAPI EGLBoolean EGLAPIENTRY eglSetStreamMetadataNV (EGLDisplay dpy, EGLStreamKHR stream, EGLint n, EGLint offset, EGLint size, const void *data); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamMetadataNV (EGLDisplay dpy, EGLStreamKHR stream, EGLenum name, EGLint n, EGLint offset, EGLint size, void *data); +#endif +#endif /* EGL_NV_stream_metadata */ + +#ifndef EGL_NV_stream_remote +#define EGL_NV_stream_remote 1 +#define EGL_STREAM_STATE_INITIALIZING_NV 0x3240 +#define EGL_STREAM_TYPE_NV 0x3241 +#define EGL_STREAM_PROTOCOL_NV 0x3242 +#define EGL_STREAM_ENDPOINT_NV 0x3243 +#define EGL_STREAM_LOCAL_NV 0x3244 +#define EGL_STREAM_PRODUCER_NV 0x3247 +#define EGL_STREAM_CONSUMER_NV 0x3248 +#define EGL_STREAM_PROTOCOL_FD_NV 0x3246 +#endif /* EGL_NV_stream_remote */ + +#ifndef EGL_NV_stream_reset +#define EGL_NV_stream_reset 1 +#define EGL_SUPPORT_RESET_NV 0x3334 +#define EGL_SUPPORT_REUSE_NV 0x3335 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLRESETSTREAMNVPROC) (EGLDisplay dpy, EGLStreamKHR stream); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglResetStreamNV (EGLDisplay dpy, EGLStreamKHR stream); +#endif +#endif /* EGL_NV_stream_reset */ + +#ifndef EGL_NV_stream_socket +#define EGL_NV_stream_socket 1 +#define EGL_STREAM_PROTOCOL_SOCKET_NV 0x324B +#define EGL_SOCKET_HANDLE_NV 0x324C +#define EGL_SOCKET_TYPE_NV 0x324D +#endif /* EGL_NV_stream_socket */ + +#ifndef EGL_NV_stream_socket_inet +#define EGL_NV_stream_socket_inet 1 +#define EGL_SOCKET_TYPE_INET_NV 0x324F +#endif /* EGL_NV_stream_socket_inet */ + +#ifndef EGL_NV_stream_socket_unix +#define EGL_NV_stream_socket_unix 1 +#define EGL_SOCKET_TYPE_UNIX_NV 0x324E +#endif /* EGL_NV_stream_socket_unix */ + +#ifndef EGL_NV_stream_sync +#define EGL_NV_stream_sync 1 +#define EGL_SYNC_NEW_FRAME_NV 0x321F +typedef EGLSyncKHR (EGLAPIENTRYP PFNEGLCREATESTREAMSYNCNVPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum type, const EGLint *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSyncKHR EGLAPIENTRY eglCreateStreamSyncNV (EGLDisplay dpy, EGLStreamKHR stream, EGLenum type, const EGLint *attrib_list); +#endif +#endif /* EGL_NV_stream_sync */ + +#ifndef EGL_NV_sync +#define EGL_NV_sync 1 +typedef void *EGLSyncNV; +typedef khronos_utime_nanoseconds_t EGLTimeNV; +#ifdef KHRONOS_SUPPORT_INT64 +#define EGL_SYNC_PRIOR_COMMANDS_COMPLETE_NV 0x30E6 +#define EGL_SYNC_STATUS_NV 0x30E7 +#define EGL_SIGNALED_NV 0x30E8 +#define EGL_UNSIGNALED_NV 0x30E9 +#define EGL_SYNC_FLUSH_COMMANDS_BIT_NV 0x0001 +#define EGL_FOREVER_NV 0xFFFFFFFFFFFFFFFFull +#define EGL_ALREADY_SIGNALED_NV 0x30EA +#define EGL_TIMEOUT_EXPIRED_NV 0x30EB +#define EGL_CONDITION_SATISFIED_NV 0x30EC +#define EGL_SYNC_TYPE_NV 0x30ED +#define EGL_SYNC_CONDITION_NV 0x30EE +#define EGL_SYNC_FENCE_NV 0x30EF +#define EGL_NO_SYNC_NV EGL_CAST(EGLSyncNV,0) +typedef EGLSyncNV (EGLAPIENTRYP PFNEGLCREATEFENCESYNCNVPROC) (EGLDisplay dpy, EGLenum condition, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLDESTROYSYNCNVPROC) (EGLSyncNV sync); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLFENCENVPROC) (EGLSyncNV sync); +typedef EGLint (EGLAPIENTRYP PFNEGLCLIENTWAITSYNCNVPROC) (EGLSyncNV sync, EGLint flags, EGLTimeNV timeout); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSIGNALSYNCNVPROC) (EGLSyncNV sync, EGLenum mode); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETSYNCATTRIBNVPROC) (EGLSyncNV sync, EGLint attribute, EGLint *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSyncNV EGLAPIENTRY eglCreateFenceSyncNV (EGLDisplay dpy, EGLenum condition, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroySyncNV (EGLSyncNV sync); +EGLAPI EGLBoolean EGLAPIENTRY eglFenceNV (EGLSyncNV sync); +EGLAPI EGLint EGLAPIENTRY eglClientWaitSyncNV (EGLSyncNV sync, EGLint flags, EGLTimeNV timeout); +EGLAPI EGLBoolean EGLAPIENTRY eglSignalSyncNV (EGLSyncNV sync, EGLenum mode); +EGLAPI EGLBoolean EGLAPIENTRY eglGetSyncAttribNV (EGLSyncNV sync, EGLint attribute, EGLint *value); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_NV_sync */ + +#ifndef EGL_NV_system_time +#define EGL_NV_system_time 1 +typedef khronos_utime_nanoseconds_t EGLuint64NV; +#ifdef KHRONOS_SUPPORT_INT64 +typedef EGLuint64NV (EGLAPIENTRYP PFNEGLGETSYSTEMTIMEFREQUENCYNVPROC) (void); +typedef EGLuint64NV (EGLAPIENTRYP PFNEGLGETSYSTEMTIMENVPROC) (void); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLuint64NV EGLAPIENTRY eglGetSystemTimeFrequencyNV (void); +EGLAPI EGLuint64NV EGLAPIENTRY eglGetSystemTimeNV (void); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_NV_system_time */ + +#ifndef EGL_TIZEN_image_native_buffer +#define EGL_TIZEN_image_native_buffer 1 +#define EGL_NATIVE_BUFFER_TIZEN 0x32A0 +#endif /* EGL_TIZEN_image_native_buffer */ + +#ifndef EGL_TIZEN_image_native_surface +#define EGL_TIZEN_image_native_surface 1 +#define EGL_NATIVE_SURFACE_TIZEN 0x32A1 +#endif /* EGL_TIZEN_image_native_surface */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/EGL/eglplatform.h b/sources/khronos/EGL/eglplatform.h new file mode 100755 index 00000000..5cc8914e --- /dev/null +++ b/sources/khronos/EGL/eglplatform.h @@ -0,0 +1,132 @@ +#ifndef __eglplatform_h_ +#define __eglplatform_h_ + +/* +** Copyright (c) 2007-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +/* Platform-specific types and definitions for egl.h + * $Revision: 30994 $ on $Date: 2015-04-30 13:36:48 -0700 (Thu, 30 Apr 2015) $ + * + * Adopters may modify khrplatform.h and this file to suit their platform. + * You are encouraged to submit all modifications to the Khronos group so that + * they can be included in future versions of this file. Please submit changes + * by sending them to the public Khronos Bugzilla (http://khronos.org/bugzilla) + * by filing a bug against product "EGL" component "Registry". + */ + +#include + +/* Macros used in EGL function prototype declarations. + * + * EGL functions should be prototyped as: + * + * EGLAPI return-type EGLAPIENTRY eglFunction(arguments); + * typedef return-type (EXPAPIENTRYP PFNEGLFUNCTIONPROC) (arguments); + * + * KHRONOS_APICALL and KHRONOS_APIENTRY are defined in KHR/khrplatform.h + */ + +#ifndef EGLAPI +#define EGLAPI KHRONOS_APICALL +#endif + +#ifndef EGLAPIENTRY +#define EGLAPIENTRY KHRONOS_APIENTRY +#endif +#define EGLAPIENTRYP EGLAPIENTRY* + +/* The types NativeDisplayType, NativeWindowType, and NativePixmapType + * are aliases of window-system-dependent types, such as X Display * or + * Windows Device Context. They must be defined in platform-specific + * code below. The EGL-prefixed versions of Native*Type are the same + * types, renamed in EGL 1.3 so all types in the API start with "EGL". + * + * Khronos STRONGLY RECOMMENDS that you use the default definitions + * provided below, since these changes affect both binary and source + * portability of applications using EGL running on different EGL + * implementations. + */ + +#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__) /* Win32 and WinCE */ +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN 1 +#endif +#include + +typedef HDC EGLNativeDisplayType; +typedef HBITMAP EGLNativePixmapType; +typedef HWND EGLNativeWindowType; + +#elif defined(__APPLE__) || defined(__WINSCW__) || defined(__SYMBIAN32__) /* Symbian */ + +typedef int EGLNativeDisplayType; +typedef void *EGLNativeWindowType; +typedef void *EGLNativePixmapType; + +#elif defined(__ANDROID__) || defined(ANDROID) + +struct ANativeWindow; +struct egl_native_pixmap_t; + +typedef struct ANativeWindow* EGLNativeWindowType; +typedef struct egl_native_pixmap_t* EGLNativePixmapType; +typedef void* EGLNativeDisplayType; + +#elif defined(__unix__) + +/* X11 (tentative) */ +#include +#include + +typedef Display *EGLNativeDisplayType; +typedef Pixmap EGLNativePixmapType; +typedef Window EGLNativeWindowType; + +#else +#error "Platform not recognized" +#endif + +/* EGL 1.2 types, renamed for consistency in EGL 1.3 */ +typedef EGLNativeDisplayType NativeDisplayType; +typedef EGLNativePixmapType NativePixmapType; +typedef EGLNativeWindowType NativeWindowType; + + +/* Define EGLint. This must be a signed integral type large enough to contain + * all legal attribute names and values passed into and out of EGL, whether + * their type is boolean, bitmask, enumerant (symbolic constant), integer, + * handle, or other. While in general a 32-bit integer will suffice, if + * handles are 64 bit types, then EGLint should be defined as a signed 64-bit + * integer type. + */ +typedef khronos_int32_t EGLint; + + +/* C++ / C typecast macros for special EGL handle values */ +#if defined(__cplusplus) +#define EGL_CAST(type, value) (static_cast(value)) +#else +#define EGL_CAST(type, value) ((type) (value)) +#endif + +#endif /* __eglplatform_h */ diff --git a/sources/khronos/GLES/egl.h b/sources/khronos/GLES/egl.h new file mode 100755 index 00000000..86f644c9 --- /dev/null +++ b/sources/khronos/GLES/egl.h @@ -0,0 +1,29 @@ +/* +** Copyright (c) 2008-2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* + * Skeleton egl.h to provide compatibility for early GLES 1.0 + * applications. Several early implementations included gl.h + * in egl.h leading applications to include only egl.h + */ + +#ifndef __legacy_egl_h_ +#define __legacy_egl_h_ + +#include +#include + +#endif /* __legacy_egl_h_ */ diff --git a/sources/khronos/GLES/gl.h b/sources/khronos/GLES/gl.h new file mode 100755 index 00000000..95d815b0 --- /dev/null +++ b/sources/khronos/GLES/gl.h @@ -0,0 +1,591 @@ +#ifndef __gl_h_ +#define __gl_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#include + +/* Generated on date 20170613 */ + +/* Generated C header for: + * API: gles1 + * Profile: common + * Versions considered: .* + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: ^(GL_OES_read_format|GL_OES_compressed_paletted_texture|GL_OES_point_size_array|GL_OES_point_sprite)$ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_VERSION_ES_CM_1_0 +#define GL_VERSION_ES_CM_1_0 1 +typedef void GLvoid; +typedef char GLchar; +typedef unsigned int GLenum; +#include +typedef khronos_float_t GLfloat; +typedef khronos_int32_t GLfixed; +typedef unsigned int GLuint; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned short GLushort; +typedef short GLshort; +typedef khronos_int8_t GLbyte; +typedef khronos_uint8_t GLubyte; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLclampx; +#define GL_VERSION_ES_CL_1_0 1 +#define GL_VERSION_ES_CM_1_1 1 +#define GL_VERSION_ES_CL_1_1 1 +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_CLIP_PLANE0 0x3000 +#define GL_CLIP_PLANE1 0x3001 +#define GL_CLIP_PLANE2 0x3002 +#define GL_CLIP_PLANE3 0x3003 +#define GL_CLIP_PLANE4 0x3004 +#define GL_CLIP_PLANE5 0x3005 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_FOG 0x0B60 +#define GL_LIGHTING 0x0B50 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_ALPHA_TEST 0x0BC0 +#define GL_BLEND 0x0BE2 +#define GL_COLOR_LOGIC_OP 0x0BF2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_POINT_SMOOTH 0x0B10 +#define GL_LINE_SMOOTH 0x0B20 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_COLOR_MATERIAL 0x0B57 +#define GL_NORMALIZE 0x0BA1 +#define GL_RESCALE_NORMAL 0x803A +#define GL_VERTEX_ARRAY 0x8074 +#define GL_NORMAL_ARRAY 0x8075 +#define GL_COLOR_ARRAY 0x8076 +#define GL_TEXTURE_COORD_ARRAY 0x8078 +#define GL_MULTISAMPLE 0x809D +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_ALPHA_TO_ONE 0x809F +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_STACK_OVERFLOW 0x0503 +#define GL_STACK_UNDERFLOW 0x0504 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_EXP 0x0800 +#define GL_EXP2 0x0801 +#define GL_FOG_DENSITY 0x0B62 +#define GL_FOG_START 0x0B63 +#define GL_FOG_END 0x0B64 +#define GL_FOG_MODE 0x0B65 +#define GL_FOG_COLOR 0x0B66 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_CURRENT_COLOR 0x0B00 +#define GL_CURRENT_NORMAL 0x0B02 +#define GL_CURRENT_TEXTURE_COORDS 0x0B03 +#define GL_POINT_SIZE 0x0B11 +#define GL_POINT_SIZE_MIN 0x8126 +#define GL_POINT_SIZE_MAX 0x8127 +#define GL_POINT_FADE_THRESHOLD_SIZE 0x8128 +#define GL_POINT_DISTANCE_ATTENUATION 0x8129 +#define GL_SMOOTH_POINT_SIZE_RANGE 0x0B12 +#define GL_LINE_WIDTH 0x0B21 +#define GL_SMOOTH_LINE_WIDTH_RANGE 0x0B22 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_SHADE_MODEL 0x0B54 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_MATRIX_MODE 0x0BA0 +#define GL_VIEWPORT 0x0BA2 +#define GL_MODELVIEW_STACK_DEPTH 0x0BA3 +#define GL_PROJECTION_STACK_DEPTH 0x0BA4 +#define GL_TEXTURE_STACK_DEPTH 0x0BA5 +#define GL_MODELVIEW_MATRIX 0x0BA6 +#define GL_PROJECTION_MATRIX 0x0BA7 +#define GL_TEXTURE_MATRIX 0x0BA8 +#define GL_ALPHA_TEST_FUNC 0x0BC1 +#define GL_ALPHA_TEST_REF 0x0BC2 +#define GL_BLEND_DST 0x0BE0 +#define GL_BLEND_SRC 0x0BE1 +#define GL_LOGIC_OP_MODE 0x0BF0 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_MAX_LIGHTS 0x0D31 +#define GL_MAX_CLIP_PLANES 0x0D32 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_MODELVIEW_STACK_DEPTH 0x0D36 +#define GL_MAX_PROJECTION_STACK_DEPTH 0x0D38 +#define GL_MAX_TEXTURE_STACK_DEPTH 0x0D39 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_MAX_TEXTURE_UNITS 0x84E2 +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_VERTEX_ARRAY_SIZE 0x807A +#define GL_VERTEX_ARRAY_TYPE 0x807B +#define GL_VERTEX_ARRAY_STRIDE 0x807C +#define GL_NORMAL_ARRAY_TYPE 0x807E +#define GL_NORMAL_ARRAY_STRIDE 0x807F +#define GL_COLOR_ARRAY_SIZE 0x8081 +#define GL_COLOR_ARRAY_TYPE 0x8082 +#define GL_COLOR_ARRAY_STRIDE 0x8083 +#define GL_TEXTURE_COORD_ARRAY_SIZE 0x8088 +#define GL_TEXTURE_COORD_ARRAY_TYPE 0x8089 +#define GL_TEXTURE_COORD_ARRAY_STRIDE 0x808A +#define GL_VERTEX_ARRAY_POINTER 0x808E +#define GL_NORMAL_ARRAY_POINTER 0x808F +#define GL_COLOR_ARRAY_POINTER 0x8090 +#define GL_TEXTURE_COORD_ARRAY_POINTER 0x8092 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_PERSPECTIVE_CORRECTION_HINT 0x0C50 +#define GL_POINT_SMOOTH_HINT 0x0C51 +#define GL_LINE_SMOOTH_HINT 0x0C52 +#define GL_FOG_HINT 0x0C54 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_LIGHT_MODEL_AMBIENT 0x0B53 +#define GL_LIGHT_MODEL_TWO_SIDE 0x0B52 +#define GL_AMBIENT 0x1200 +#define GL_DIFFUSE 0x1201 +#define GL_SPECULAR 0x1202 +#define GL_POSITION 0x1203 +#define GL_SPOT_DIRECTION 0x1204 +#define GL_SPOT_EXPONENT 0x1205 +#define GL_SPOT_CUTOFF 0x1206 +#define GL_CONSTANT_ATTENUATION 0x1207 +#define GL_LINEAR_ATTENUATION 0x1208 +#define GL_QUADRATIC_ATTENUATION 0x1209 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_CLEAR 0x1500 +#define GL_AND 0x1501 +#define GL_AND_REVERSE 0x1502 +#define GL_COPY 0x1503 +#define GL_AND_INVERTED 0x1504 +#define GL_NOOP 0x1505 +#define GL_XOR 0x1506 +#define GL_OR 0x1507 +#define GL_NOR 0x1508 +#define GL_EQUIV 0x1509 +#define GL_INVERT 0x150A +#define GL_OR_REVERSE 0x150B +#define GL_COPY_INVERTED 0x150C +#define GL_OR_INVERTED 0x150D +#define GL_NAND 0x150E +#define GL_SET 0x150F +#define GL_EMISSION 0x1600 +#define GL_SHININESS 0x1601 +#define GL_AMBIENT_AND_DIFFUSE 0x1602 +#define GL_MODELVIEW 0x1700 +#define GL_PROJECTION 0x1701 +#define GL_TEXTURE 0x1702 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FLAT 0x1D00 +#define GL_SMOOTH 0x1D01 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_MODULATE 0x2100 +#define GL_DECAL 0x2101 +#define GL_ADD 0x0104 +#define GL_TEXTURE_ENV_MODE 0x2200 +#define GL_TEXTURE_ENV_COLOR 0x2201 +#define GL_TEXTURE_ENV 0x2300 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_GENERATE_MIPMAP 0x8191 +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_CLIENT_ACTIVE_TEXTURE 0x84E1 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_LIGHT0 0x4000 +#define GL_LIGHT1 0x4001 +#define GL_LIGHT2 0x4002 +#define GL_LIGHT3 0x4003 +#define GL_LIGHT4 0x4004 +#define GL_LIGHT5 0x4005 +#define GL_LIGHT6 0x4006 +#define GL_LIGHT7 0x4007 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_VERTEX_ARRAY_BUFFER_BINDING 0x8896 +#define GL_NORMAL_ARRAY_BUFFER_BINDING 0x8897 +#define GL_COLOR_ARRAY_BUFFER_BINDING 0x8898 +#define GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING 0x889A +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_SUBTRACT 0x84E7 +#define GL_COMBINE 0x8570 +#define GL_COMBINE_RGB 0x8571 +#define GL_COMBINE_ALPHA 0x8572 +#define GL_RGB_SCALE 0x8573 +#define GL_ADD_SIGNED 0x8574 +#define GL_INTERPOLATE 0x8575 +#define GL_CONSTANT 0x8576 +#define GL_PRIMARY_COLOR 0x8577 +#define GL_PREVIOUS 0x8578 +#define GL_OPERAND0_RGB 0x8590 +#define GL_OPERAND1_RGB 0x8591 +#define GL_OPERAND2_RGB 0x8592 +#define GL_OPERAND0_ALPHA 0x8598 +#define GL_OPERAND1_ALPHA 0x8599 +#define GL_OPERAND2_ALPHA 0x859A +#define GL_ALPHA_SCALE 0x0D1C +#define GL_SRC0_RGB 0x8580 +#define GL_SRC1_RGB 0x8581 +#define GL_SRC2_RGB 0x8582 +#define GL_SRC0_ALPHA 0x8588 +#define GL_SRC1_ALPHA 0x8589 +#define GL_SRC2_ALPHA 0x858A +#define GL_DOT3_RGB 0x86AE +#define GL_DOT3_RGBA 0x86AF +GL_API void GL_APIENTRY glAlphaFunc (GLenum func, GLfloat ref); +GL_API void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_API void GL_APIENTRY glClearDepthf (GLfloat d); +GL_API void GL_APIENTRY glClipPlanef (GLenum p, const GLfloat *eqn); +GL_API void GL_APIENTRY glColor4f (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_API void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_API void GL_APIENTRY glFogf (GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glFogfv (GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glFrustumf (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +GL_API void GL_APIENTRY glGetClipPlanef (GLenum plane, GLfloat *equation); +GL_API void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_API void GL_APIENTRY glGetLightfv (GLenum light, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glGetMaterialfv (GLenum face, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glGetTexEnvfv (GLenum target, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glLightModelf (GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glLightModelfv (GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glLightf (GLenum light, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glLightfv (GLenum light, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glLineWidth (GLfloat width); +GL_API void GL_APIENTRY glLoadMatrixf (const GLfloat *m); +GL_API void GL_APIENTRY glMaterialf (GLenum face, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glMaterialfv (GLenum face, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glMultMatrixf (const GLfloat *m); +GL_API void GL_APIENTRY glMultiTexCoord4f (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q); +GL_API void GL_APIENTRY glNormal3f (GLfloat nx, GLfloat ny, GLfloat nz); +GL_API void GL_APIENTRY glOrthof (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +GL_API void GL_APIENTRY glPointParameterf (GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glPointParameterfv (GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glPointSize (GLfloat size); +GL_API void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_API void GL_APIENTRY glRotatef (GLfloat angle, GLfloat x, GLfloat y, GLfloat z); +GL_API void GL_APIENTRY glScalef (GLfloat x, GLfloat y, GLfloat z); +GL_API void GL_APIENTRY glTexEnvf (GLenum target, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glTexEnvfv (GLenum target, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glTranslatef (GLfloat x, GLfloat y, GLfloat z); +GL_API void GL_APIENTRY glActiveTexture (GLenum texture); +GL_API void GL_APIENTRY glAlphaFuncx (GLenum func, GLfixed ref); +GL_API void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_API void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_API void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_API void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_API void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_API void GL_APIENTRY glClear (GLbitfield mask); +GL_API void GL_APIENTRY glClearColorx (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +GL_API void GL_APIENTRY glClearDepthx (GLfixed depth); +GL_API void GL_APIENTRY glClearStencil (GLint s); +GL_API void GL_APIENTRY glClientActiveTexture (GLenum texture); +GL_API void GL_APIENTRY glClipPlanex (GLenum plane, const GLfixed *equation); +GL_API void GL_APIENTRY glColor4ub (GLubyte red, GLubyte green, GLubyte blue, GLubyte alpha); +GL_API void GL_APIENTRY glColor4x (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +GL_API void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_API void GL_APIENTRY glColorPointer (GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_API void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_API void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_API void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glCullFace (GLenum mode); +GL_API void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_API void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_API void GL_APIENTRY glDepthFunc (GLenum func); +GL_API void GL_APIENTRY glDepthMask (GLboolean flag); +GL_API void GL_APIENTRY glDepthRangex (GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glDisable (GLenum cap); +GL_API void GL_APIENTRY glDisableClientState (GLenum array); +GL_API void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_API void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_API void GL_APIENTRY glEnable (GLenum cap); +GL_API void GL_APIENTRY glEnableClientState (GLenum array); +GL_API void GL_APIENTRY glFinish (void); +GL_API void GL_APIENTRY glFlush (void); +GL_API void GL_APIENTRY glFogx (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glFogxv (GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glFrontFace (GLenum mode); +GL_API void GL_APIENTRY glFrustumx (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_API void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glGetClipPlanex (GLenum plane, GLfixed *equation); +GL_API void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_API void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_API GLenum GL_APIENTRY glGetError (void); +GL_API void GL_APIENTRY glGetFixedv (GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_API void GL_APIENTRY glGetLightxv (GLenum light, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetMaterialxv (GLenum face, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetPointerv (GLenum pname, void **params); +GL_API const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_API void GL_APIENTRY glGetTexEnviv (GLenum target, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glGetTexEnvxv (GLenum target, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glGetTexParameterxv (GLenum target, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_API GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_API GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_API GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_API void GL_APIENTRY glLightModelx (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glLightModelxv (GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glLightx (GLenum light, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glLightxv (GLenum light, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glLineWidthx (GLfixed width); +GL_API void GL_APIENTRY glLoadIdentity (void); +GL_API void GL_APIENTRY glLoadMatrixx (const GLfixed *m); +GL_API void GL_APIENTRY glLogicOp (GLenum opcode); +GL_API void GL_APIENTRY glMaterialx (GLenum face, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glMaterialxv (GLenum face, GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glMatrixMode (GLenum mode); +GL_API void GL_APIENTRY glMultMatrixx (const GLfixed *m); +GL_API void GL_APIENTRY glMultiTexCoord4x (GLenum texture, GLfixed s, GLfixed t, GLfixed r, GLfixed q); +GL_API void GL_APIENTRY glNormal3x (GLfixed nx, GLfixed ny, GLfixed nz); +GL_API void GL_APIENTRY glNormalPointer (GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glOrthox (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_API void GL_APIENTRY glPointParameterx (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glPointParameterxv (GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glPointSizex (GLfixed size); +GL_API void GL_APIENTRY glPolygonOffsetx (GLfixed factor, GLfixed units); +GL_API void GL_APIENTRY glPopMatrix (void); +GL_API void GL_APIENTRY glPushMatrix (void); +GL_API void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_API void GL_APIENTRY glRotatex (GLfixed angle, GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_API void GL_APIENTRY glSampleCoveragex (GLclampx value, GLboolean invert); +GL_API void GL_APIENTRY glScalex (GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glShadeModel (GLenum mode); +GL_API void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_API void GL_APIENTRY glStencilMask (GLuint mask); +GL_API void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_API void GL_APIENTRY glTexCoordPointer (GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glTexEnvi (GLenum target, GLenum pname, GLint param); +GL_API void GL_APIENTRY glTexEnvx (GLenum target, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexEnviv (GLenum target, GLenum pname, const GLint *params); +GL_API void GL_APIENTRY glTexEnvxv (GLenum target, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_API void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_API void GL_APIENTRY glTexParameterx (GLenum target, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_API void GL_APIENTRY glTexParameterxv (GLenum target, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_API void GL_APIENTRY glTranslatex (GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glVertexPointer (GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif /* GL_VERSION_ES_CM_1_0 */ + +#ifndef GL_OES_compressed_paletted_texture +#define GL_OES_compressed_paletted_texture 1 +#define GL_PALETTE4_RGB8_OES 0x8B90 +#define GL_PALETTE4_RGBA8_OES 0x8B91 +#define GL_PALETTE4_R5_G6_B5_OES 0x8B92 +#define GL_PALETTE4_RGBA4_OES 0x8B93 +#define GL_PALETTE4_RGB5_A1_OES 0x8B94 +#define GL_PALETTE8_RGB8_OES 0x8B95 +#define GL_PALETTE8_RGBA8_OES 0x8B96 +#define GL_PALETTE8_R5_G6_B5_OES 0x8B97 +#define GL_PALETTE8_RGBA4_OES 0x8B98 +#define GL_PALETTE8_RGB5_A1_OES 0x8B99 +#endif /* GL_OES_compressed_paletted_texture */ + +#ifndef GL_OES_point_size_array +#define GL_OES_point_size_array 1 +#define GL_POINT_SIZE_ARRAY_OES 0x8B9C +#define GL_POINT_SIZE_ARRAY_TYPE_OES 0x898A +#define GL_POINT_SIZE_ARRAY_STRIDE_OES 0x898B +#define GL_POINT_SIZE_ARRAY_POINTER_OES 0x898C +#define GL_POINT_SIZE_ARRAY_BUFFER_BINDING_OES 0x8B9F +GL_API void GL_APIENTRY glPointSizePointerOES (GLenum type, GLsizei stride, const void *pointer); +#endif /* GL_OES_point_size_array */ + +#ifndef GL_OES_point_sprite +#define GL_OES_point_sprite 1 +#define GL_POINT_SPRITE_OES 0x8861 +#define GL_COORD_REPLACE_OES 0x8862 +#endif /* GL_OES_point_sprite */ + +#ifndef GL_OES_read_format +#define GL_OES_read_format 1 +#define GL_IMPLEMENTATION_COLOR_READ_TYPE_OES 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES 0x8B9B +#endif /* GL_OES_read_format */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/GLES/glext.h b/sources/khronos/GLES/glext.h new file mode 100755 index 00000000..1a150e34 --- /dev/null +++ b/sources/khronos/GLES/glext.h @@ -0,0 +1,948 @@ +#ifndef __glext_h_ +#define __glext_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +/* Generated on date 20170613 */ + +/* Generated C header for: + * API: gles1 + * Profile: common + * Versions considered: .* + * Versions emitted: _nomatch_^ + * Default extensions included: gles1 + * Additional extensions included: _nomatch_^ + * Extensions removed: ^(GL_OES_read_format|GL_OES_compressed_paletted_texture|GL_OES_point_size_array|GL_OES_point_sprite)$ + */ + +#ifndef GL_OES_EGL_image +#define GL_OES_EGL_image 1 +typedef void *GLeglImageOES; +typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) (GLenum target, GLeglImageOES image); +typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETRENDERBUFFERSTORAGEOESPROC) (GLenum target, GLeglImageOES image); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glEGLImageTargetTexture2DOES (GLenum target, GLeglImageOES image); +GL_API void GL_APIENTRY glEGLImageTargetRenderbufferStorageOES (GLenum target, GLeglImageOES image); +#endif +#endif /* GL_OES_EGL_image */ + +#ifndef GL_OES_EGL_image_external +#define GL_OES_EGL_image_external 1 +#define GL_TEXTURE_EXTERNAL_OES 0x8D65 +#define GL_TEXTURE_BINDING_EXTERNAL_OES 0x8D67 +#define GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES 0x8D68 +#endif /* GL_OES_EGL_image_external */ + +#ifndef GL_OES_blend_equation_separate +#define GL_OES_blend_equation_separate 1 +#define GL_BLEND_EQUATION_RGB_OES 0x8009 +#define GL_BLEND_EQUATION_ALPHA_OES 0x883D +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEOESPROC) (GLenum modeRGB, GLenum modeAlpha); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glBlendEquationSeparateOES (GLenum modeRGB, GLenum modeAlpha); +#endif +#endif /* GL_OES_blend_equation_separate */ + +#ifndef GL_OES_blend_func_separate +#define GL_OES_blend_func_separate 1 +#define GL_BLEND_DST_RGB_OES 0x80C8 +#define GL_BLEND_SRC_RGB_OES 0x80C9 +#define GL_BLEND_DST_ALPHA_OES 0x80CA +#define GL_BLEND_SRC_ALPHA_OES 0x80CB +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEOESPROC) (GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glBlendFuncSeparateOES (GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +#endif +#endif /* GL_OES_blend_func_separate */ + +#ifndef GL_OES_blend_subtract +#define GL_OES_blend_subtract 1 +#define GL_BLEND_EQUATION_OES 0x8009 +#define GL_FUNC_ADD_OES 0x8006 +#define GL_FUNC_SUBTRACT_OES 0x800A +#define GL_FUNC_REVERSE_SUBTRACT_OES 0x800B +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONOESPROC) (GLenum mode); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glBlendEquationOES (GLenum mode); +#endif +#endif /* GL_OES_blend_subtract */ + +#ifndef GL_OES_byte_coordinates +#define GL_OES_byte_coordinates 1 +#endif /* GL_OES_byte_coordinates */ + +#ifndef GL_OES_compressed_ETC1_RGB8_sub_texture +#define GL_OES_compressed_ETC1_RGB8_sub_texture 1 +#endif /* GL_OES_compressed_ETC1_RGB8_sub_texture */ + +#ifndef GL_OES_compressed_ETC1_RGB8_texture +#define GL_OES_compressed_ETC1_RGB8_texture 1 +#define GL_ETC1_RGB8_OES 0x8D64 +#endif /* GL_OES_compressed_ETC1_RGB8_texture */ + +#ifndef GL_OES_depth24 +#define GL_OES_depth24 1 +#define GL_DEPTH_COMPONENT24_OES 0x81A6 +#endif /* GL_OES_depth24 */ + +#ifndef GL_OES_depth32 +#define GL_OES_depth32 1 +#define GL_DEPTH_COMPONENT32_OES 0x81A7 +#endif /* GL_OES_depth32 */ + +#ifndef GL_OES_draw_texture +#define GL_OES_draw_texture 1 +#define GL_TEXTURE_CROP_RECT_OES 0x8B9D +typedef void (GL_APIENTRYP PFNGLDRAWTEXSOESPROC) (GLshort x, GLshort y, GLshort z, GLshort width, GLshort height); +typedef void (GL_APIENTRYP PFNGLDRAWTEXIOESPROC) (GLint x, GLint y, GLint z, GLint width, GLint height); +typedef void (GL_APIENTRYP PFNGLDRAWTEXXOESPROC) (GLfixed x, GLfixed y, GLfixed z, GLfixed width, GLfixed height); +typedef void (GL_APIENTRYP PFNGLDRAWTEXSVOESPROC) (const GLshort *coords); +typedef void (GL_APIENTRYP PFNGLDRAWTEXIVOESPROC) (const GLint *coords); +typedef void (GL_APIENTRYP PFNGLDRAWTEXXVOESPROC) (const GLfixed *coords); +typedef void (GL_APIENTRYP PFNGLDRAWTEXFOESPROC) (GLfloat x, GLfloat y, GLfloat z, GLfloat width, GLfloat height); +typedef void (GL_APIENTRYP PFNGLDRAWTEXFVOESPROC) (const GLfloat *coords); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glDrawTexsOES (GLshort x, GLshort y, GLshort z, GLshort width, GLshort height); +GL_API void GL_APIENTRY glDrawTexiOES (GLint x, GLint y, GLint z, GLint width, GLint height); +GL_API void GL_APIENTRY glDrawTexxOES (GLfixed x, GLfixed y, GLfixed z, GLfixed width, GLfixed height); +GL_API void GL_APIENTRY glDrawTexsvOES (const GLshort *coords); +GL_API void GL_APIENTRY glDrawTexivOES (const GLint *coords); +GL_API void GL_APIENTRY glDrawTexxvOES (const GLfixed *coords); +GL_API void GL_APIENTRY glDrawTexfOES (GLfloat x, GLfloat y, GLfloat z, GLfloat width, GLfloat height); +GL_API void GL_APIENTRY glDrawTexfvOES (const GLfloat *coords); +#endif +#endif /* GL_OES_draw_texture */ + +#ifndef GL_OES_element_index_uint +#define GL_OES_element_index_uint 1 +#define GL_UNSIGNED_INT 0x1405 +#endif /* GL_OES_element_index_uint */ + +#ifndef GL_OES_extended_matrix_palette +#define GL_OES_extended_matrix_palette 1 +#endif /* GL_OES_extended_matrix_palette */ + +#ifndef GL_OES_fbo_render_mipmap +#define GL_OES_fbo_render_mipmap 1 +#endif /* GL_OES_fbo_render_mipmap */ + +#ifndef GL_OES_fixed_point +#define GL_OES_fixed_point 1 +#define GL_FIXED_OES 0x140C +typedef void (GL_APIENTRYP PFNGLALPHAFUNCXOESPROC) (GLenum func, GLfixed ref); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORXOESPROC) (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHXOESPROC) (GLfixed depth); +typedef void (GL_APIENTRYP PFNGLCLIPPLANEXOESPROC) (GLenum plane, const GLfixed *equation); +typedef void (GL_APIENTRYP PFNGLCOLOR4XOESPROC) (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEXOESPROC) (GLfixed n, GLfixed f); +typedef void (GL_APIENTRYP PFNGLFOGXOESPROC) (GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLFOGXVOESPROC) (GLenum pname, const GLfixed *param); +typedef void (GL_APIENTRYP PFNGLFRUSTUMXOESPROC) (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +typedef void (GL_APIENTRYP PFNGLGETCLIPPLANEXOESPROC) (GLenum plane, GLfixed *equation); +typedef void (GL_APIENTRYP PFNGLGETFIXEDVOESPROC) (GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLGETTEXENVXVOESPROC) (GLenum target, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERXVOESPROC) (GLenum target, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLLIGHTMODELXOESPROC) (GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLLIGHTMODELXVOESPROC) (GLenum pname, const GLfixed *param); +typedef void (GL_APIENTRYP PFNGLLIGHTXOESPROC) (GLenum light, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLLIGHTXVOESPROC) (GLenum light, GLenum pname, const GLfixed *params); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHXOESPROC) (GLfixed width); +typedef void (GL_APIENTRYP PFNGLLOADMATRIXXOESPROC) (const GLfixed *m); +typedef void (GL_APIENTRYP PFNGLMATERIALXOESPROC) (GLenum face, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLMATERIALXVOESPROC) (GLenum face, GLenum pname, const GLfixed *param); +typedef void (GL_APIENTRYP PFNGLMULTMATRIXXOESPROC) (const GLfixed *m); +typedef void (GL_APIENTRYP PFNGLMULTITEXCOORD4XOESPROC) (GLenum texture, GLfixed s, GLfixed t, GLfixed r, GLfixed q); +typedef void (GL_APIENTRYP PFNGLNORMAL3XOESPROC) (GLfixed nx, GLfixed ny, GLfixed nz); +typedef void (GL_APIENTRYP PFNGLORTHOXOESPROC) (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +typedef void (GL_APIENTRYP PFNGLPOINTPARAMETERXVOESPROC) (GLenum pname, const GLfixed *params); +typedef void (GL_APIENTRYP PFNGLPOINTSIZEXOESPROC) (GLfixed size); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETXOESPROC) (GLfixed factor, GLfixed units); +typedef void (GL_APIENTRYP PFNGLROTATEXOESPROC) (GLfixed angle, GLfixed x, GLfixed y, GLfixed z); +typedef void (GL_APIENTRYP PFNGLSCALEXOESPROC) (GLfixed x, GLfixed y, GLfixed z); +typedef void (GL_APIENTRYP PFNGLTEXENVXOESPROC) (GLenum target, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLTEXENVXVOESPROC) (GLenum target, GLenum pname, const GLfixed *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERXOESPROC) (GLenum target, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERXVOESPROC) (GLenum target, GLenum pname, const GLfixed *params); +typedef void (GL_APIENTRYP PFNGLTRANSLATEXOESPROC) (GLfixed x, GLfixed y, GLfixed z); +typedef void (GL_APIENTRYP PFNGLGETLIGHTXVOESPROC) (GLenum light, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLGETMATERIALXVOESPROC) (GLenum face, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLPOINTPARAMETERXOESPROC) (GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEXOESPROC) (GLclampx value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLGETTEXGENXVOESPROC) (GLenum coord, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLTEXGENXOESPROC) (GLenum coord, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLTEXGENXVOESPROC) (GLenum coord, GLenum pname, const GLfixed *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glAlphaFuncxOES (GLenum func, GLfixed ref); +GL_API void GL_APIENTRY glClearColorxOES (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +GL_API void GL_APIENTRY glClearDepthxOES (GLfixed depth); +GL_API void GL_APIENTRY glClipPlanexOES (GLenum plane, const GLfixed *equation); +GL_API void GL_APIENTRY glColor4xOES (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +GL_API void GL_APIENTRY glDepthRangexOES (GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glFogxOES (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glFogxvOES (GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glFrustumxOES (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glGetClipPlanexOES (GLenum plane, GLfixed *equation); +GL_API void GL_APIENTRY glGetFixedvOES (GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetTexEnvxvOES (GLenum target, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetTexParameterxvOES (GLenum target, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glLightModelxOES (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glLightModelxvOES (GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glLightxOES (GLenum light, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glLightxvOES (GLenum light, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glLineWidthxOES (GLfixed width); +GL_API void GL_APIENTRY glLoadMatrixxOES (const GLfixed *m); +GL_API void GL_APIENTRY glMaterialxOES (GLenum face, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glMaterialxvOES (GLenum face, GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glMultMatrixxOES (const GLfixed *m); +GL_API void GL_APIENTRY glMultiTexCoord4xOES (GLenum texture, GLfixed s, GLfixed t, GLfixed r, GLfixed q); +GL_API void GL_APIENTRY glNormal3xOES (GLfixed nx, GLfixed ny, GLfixed nz); +GL_API void GL_APIENTRY glOrthoxOES (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glPointParameterxvOES (GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glPointSizexOES (GLfixed size); +GL_API void GL_APIENTRY glPolygonOffsetxOES (GLfixed factor, GLfixed units); +GL_API void GL_APIENTRY glRotatexOES (GLfixed angle, GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glScalexOES (GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glTexEnvxOES (GLenum target, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexEnvxvOES (GLenum target, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glTexParameterxOES (GLenum target, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexParameterxvOES (GLenum target, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glTranslatexOES (GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glGetLightxvOES (GLenum light, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetMaterialxvOES (GLenum face, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glPointParameterxOES (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glSampleCoveragexOES (GLclampx value, GLboolean invert); +GL_API void GL_APIENTRY glGetTexGenxvOES (GLenum coord, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glTexGenxOES (GLenum coord, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexGenxvOES (GLenum coord, GLenum pname, const GLfixed *params); +#endif +#endif /* GL_OES_fixed_point */ + +#ifndef GL_OES_framebuffer_object +#define GL_OES_framebuffer_object 1 +#define GL_NONE_OES 0 +#define GL_FRAMEBUFFER_OES 0x8D40 +#define GL_RENDERBUFFER_OES 0x8D41 +#define GL_RGBA4_OES 0x8056 +#define GL_RGB5_A1_OES 0x8057 +#define GL_RGB565_OES 0x8D62 +#define GL_DEPTH_COMPONENT16_OES 0x81A5 +#define GL_RENDERBUFFER_WIDTH_OES 0x8D42 +#define GL_RENDERBUFFER_HEIGHT_OES 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT_OES 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE_OES 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE_OES 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE_OES 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE_OES 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE_OES 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE_OES 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_OES 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_OES 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_OES 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_OES 0x8CD3 +#define GL_COLOR_ATTACHMENT0_OES 0x8CE0 +#define GL_DEPTH_ATTACHMENT_OES 0x8D00 +#define GL_STENCIL_ATTACHMENT_OES 0x8D20 +#define GL_FRAMEBUFFER_COMPLETE_OES 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_OES 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_OES 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_OES 0x8CD9 +#define GL_FRAMEBUFFER_INCOMPLETE_FORMATS_OES 0x8CDA +#define GL_FRAMEBUFFER_UNSUPPORTED_OES 0x8CDD +#define GL_FRAMEBUFFER_BINDING_OES 0x8CA6 +#define GL_RENDERBUFFER_BINDING_OES 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE_OES 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION_OES 0x0506 +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFEROESPROC) (GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFEROESPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSOESPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSOESPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEOESPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVOESPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFEROESPROC) (GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFEROESPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSOESPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSOESPROC) (GLsizei n, GLuint *framebuffers); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSOESPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFEROESPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DOESPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVOESPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPOESPROC) (GLenum target); +#ifdef GL_GLEXT_PROTOTYPES +GL_API GLboolean GL_APIENTRY glIsRenderbufferOES (GLuint renderbuffer); +GL_API void GL_APIENTRY glBindRenderbufferOES (GLenum target, GLuint renderbuffer); +GL_API void GL_APIENTRY glDeleteRenderbuffersOES (GLsizei n, const GLuint *renderbuffers); +GL_API void GL_APIENTRY glGenRenderbuffersOES (GLsizei n, GLuint *renderbuffers); +GL_API void GL_APIENTRY glRenderbufferStorageOES (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glGetRenderbufferParameterivOES (GLenum target, GLenum pname, GLint *params); +GL_API GLboolean GL_APIENTRY glIsFramebufferOES (GLuint framebuffer); +GL_API void GL_APIENTRY glBindFramebufferOES (GLenum target, GLuint framebuffer); +GL_API void GL_APIENTRY glDeleteFramebuffersOES (GLsizei n, const GLuint *framebuffers); +GL_API void GL_APIENTRY glGenFramebuffersOES (GLsizei n, GLuint *framebuffers); +GL_API GLenum GL_APIENTRY glCheckFramebufferStatusOES (GLenum target); +GL_API void GL_APIENTRY glFramebufferRenderbufferOES (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_API void GL_APIENTRY glFramebufferTexture2DOES (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_API void GL_APIENTRY glGetFramebufferAttachmentParameterivOES (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glGenerateMipmapOES (GLenum target); +#endif +#endif /* GL_OES_framebuffer_object */ + +#ifndef GL_OES_mapbuffer +#define GL_OES_mapbuffer 1 +#define GL_WRITE_ONLY_OES 0x88B9 +#define GL_BUFFER_ACCESS_OES 0x88BB +#define GL_BUFFER_MAPPED_OES 0x88BC +#define GL_BUFFER_MAP_POINTER_OES 0x88BD +typedef void *(GL_APIENTRYP PFNGLMAPBUFFEROESPROC) (GLenum target, GLenum access); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFEROESPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVOESPROC) (GLenum target, GLenum pname, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void *GL_APIENTRY glMapBufferOES (GLenum target, GLenum access); +GL_API GLboolean GL_APIENTRY glUnmapBufferOES (GLenum target); +GL_API void GL_APIENTRY glGetBufferPointervOES (GLenum target, GLenum pname, void **params); +#endif +#endif /* GL_OES_mapbuffer */ + +#ifndef GL_OES_matrix_get +#define GL_OES_matrix_get 1 +#define GL_MODELVIEW_MATRIX_FLOAT_AS_INT_BITS_OES 0x898D +#define GL_PROJECTION_MATRIX_FLOAT_AS_INT_BITS_OES 0x898E +#define GL_TEXTURE_MATRIX_FLOAT_AS_INT_BITS_OES 0x898F +#endif /* GL_OES_matrix_get */ + +#ifndef GL_OES_matrix_palette +#define GL_OES_matrix_palette 1 +#define GL_MAX_VERTEX_UNITS_OES 0x86A4 +#define GL_MAX_PALETTE_MATRICES_OES 0x8842 +#define GL_MATRIX_PALETTE_OES 0x8840 +#define GL_MATRIX_INDEX_ARRAY_OES 0x8844 +#define GL_WEIGHT_ARRAY_OES 0x86AD +#define GL_CURRENT_PALETTE_MATRIX_OES 0x8843 +#define GL_MATRIX_INDEX_ARRAY_SIZE_OES 0x8846 +#define GL_MATRIX_INDEX_ARRAY_TYPE_OES 0x8847 +#define GL_MATRIX_INDEX_ARRAY_STRIDE_OES 0x8848 +#define GL_MATRIX_INDEX_ARRAY_POINTER_OES 0x8849 +#define GL_MATRIX_INDEX_ARRAY_BUFFER_BINDING_OES 0x8B9E +#define GL_WEIGHT_ARRAY_SIZE_OES 0x86AB +#define GL_WEIGHT_ARRAY_TYPE_OES 0x86A9 +#define GL_WEIGHT_ARRAY_STRIDE_OES 0x86AA +#define GL_WEIGHT_ARRAY_POINTER_OES 0x86AC +#define GL_WEIGHT_ARRAY_BUFFER_BINDING_OES 0x889E +typedef void (GL_APIENTRYP PFNGLCURRENTPALETTEMATRIXOESPROC) (GLuint matrixpaletteindex); +typedef void (GL_APIENTRYP PFNGLLOADPALETTEFROMMODELVIEWMATRIXOESPROC) (void); +typedef void (GL_APIENTRYP PFNGLMATRIXINDEXPOINTEROESPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLWEIGHTPOINTEROESPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glCurrentPaletteMatrixOES (GLuint matrixpaletteindex); +GL_API void GL_APIENTRY glLoadPaletteFromModelViewMatrixOES (void); +GL_API void GL_APIENTRY glMatrixIndexPointerOES (GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glWeightPointerOES (GLint size, GLenum type, GLsizei stride, const void *pointer); +#endif +#endif /* GL_OES_matrix_palette */ + +#ifndef GL_OES_packed_depth_stencil +#define GL_OES_packed_depth_stencil 1 +#define GL_DEPTH_STENCIL_OES 0x84F9 +#define GL_UNSIGNED_INT_24_8_OES 0x84FA +#define GL_DEPTH24_STENCIL8_OES 0x88F0 +#endif /* GL_OES_packed_depth_stencil */ + +#ifndef GL_OES_query_matrix +#define GL_OES_query_matrix 1 +typedef GLbitfield (GL_APIENTRYP PFNGLQUERYMATRIXXOESPROC) (GLfixed *mantissa, GLint *exponent); +#ifdef GL_GLEXT_PROTOTYPES +GL_API GLbitfield GL_APIENTRY glQueryMatrixxOES (GLfixed *mantissa, GLint *exponent); +#endif +#endif /* GL_OES_query_matrix */ + +#ifndef GL_OES_required_internalformat +#define GL_OES_required_internalformat 1 +#define GL_ALPHA8_OES 0x803C +#define GL_LUMINANCE4_ALPHA4_OES 0x8043 +#define GL_LUMINANCE8_ALPHA8_OES 0x8045 +#define GL_LUMINANCE8_OES 0x8040 +#define GL_RGB8_OES 0x8051 +#define GL_RGBA8_OES 0x8058 +#define GL_RGB10_EXT 0x8052 +#define GL_RGB10_A2_EXT 0x8059 +#endif /* GL_OES_required_internalformat */ + +#ifndef GL_OES_rgb8_rgba8 +#define GL_OES_rgb8_rgba8 1 +#endif /* GL_OES_rgb8_rgba8 */ + +#ifndef GL_OES_single_precision +#define GL_OES_single_precision 1 +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFOESPROC) (GLclampf depth); +typedef void (GL_APIENTRYP PFNGLCLIPPLANEFOESPROC) (GLenum plane, const GLfloat *equation); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFOESPROC) (GLclampf n, GLclampf f); +typedef void (GL_APIENTRYP PFNGLFRUSTUMFOESPROC) (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLGETCLIPPLANEFOESPROC) (GLenum plane, GLfloat *equation); +typedef void (GL_APIENTRYP PFNGLORTHOFOESPROC) (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glClearDepthfOES (GLclampf depth); +GL_API void GL_APIENTRY glClipPlanefOES (GLenum plane, const GLfloat *equation); +GL_API void GL_APIENTRY glDepthRangefOES (GLclampf n, GLclampf f); +GL_API void GL_APIENTRY glFrustumfOES (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +GL_API void GL_APIENTRY glGetClipPlanefOES (GLenum plane, GLfloat *equation); +GL_API void GL_APIENTRY glOrthofOES (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +#endif +#endif /* GL_OES_single_precision */ + +#ifndef GL_OES_stencil1 +#define GL_OES_stencil1 1 +#define GL_STENCIL_INDEX1_OES 0x8D46 +#endif /* GL_OES_stencil1 */ + +#ifndef GL_OES_stencil4 +#define GL_OES_stencil4 1 +#define GL_STENCIL_INDEX4_OES 0x8D47 +#endif /* GL_OES_stencil4 */ + +#ifndef GL_OES_stencil8 +#define GL_OES_stencil8 1 +#define GL_STENCIL_INDEX8_OES 0x8D48 +#endif /* GL_OES_stencil8 */ + +#ifndef GL_OES_stencil_wrap +#define GL_OES_stencil_wrap 1 +#define GL_INCR_WRAP_OES 0x8507 +#define GL_DECR_WRAP_OES 0x8508 +#endif /* GL_OES_stencil_wrap */ + +#ifndef GL_OES_texture_cube_map +#define GL_OES_texture_cube_map 1 +#define GL_NORMAL_MAP_OES 0x8511 +#define GL_REFLECTION_MAP_OES 0x8512 +#define GL_TEXTURE_CUBE_MAP_OES 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP_OES 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X_OES 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X_OES 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y_OES 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_OES 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z_OES 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_OES 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE_OES 0x851C +#define GL_TEXTURE_GEN_MODE_OES 0x2500 +#define GL_TEXTURE_GEN_STR_OES 0x8D60 +typedef void (GL_APIENTRYP PFNGLTEXGENFOESPROC) (GLenum coord, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXGENFVOESPROC) (GLenum coord, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXGENIOESPROC) (GLenum coord, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXGENIVOESPROC) (GLenum coord, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXGENFVOESPROC) (GLenum coord, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXGENIVOESPROC) (GLenum coord, GLenum pname, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glTexGenfOES (GLenum coord, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glTexGenfvOES (GLenum coord, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glTexGeniOES (GLenum coord, GLenum pname, GLint param); +GL_API void GL_APIENTRY glTexGenivOES (GLenum coord, GLenum pname, const GLint *params); +GL_API void GL_APIENTRY glGetTexGenfvOES (GLenum coord, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glGetTexGenivOES (GLenum coord, GLenum pname, GLint *params); +#endif +#endif /* GL_OES_texture_cube_map */ + +#ifndef GL_OES_texture_env_crossbar +#define GL_OES_texture_env_crossbar 1 +#endif /* GL_OES_texture_env_crossbar */ + +#ifndef GL_OES_texture_mirrored_repeat +#define GL_OES_texture_mirrored_repeat 1 +#define GL_MIRRORED_REPEAT_OES 0x8370 +#endif /* GL_OES_texture_mirrored_repeat */ + +#ifndef GL_OES_vertex_array_object +#define GL_OES_vertex_array_object 1 +#define GL_VERTEX_ARRAY_BINDING_OES 0x85B5 +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYOESPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSOESPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSOESPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYOESPROC) (GLuint array); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glBindVertexArrayOES (GLuint array); +GL_API void GL_APIENTRY glDeleteVertexArraysOES (GLsizei n, const GLuint *arrays); +GL_API void GL_APIENTRY glGenVertexArraysOES (GLsizei n, GLuint *arrays); +GL_API GLboolean GL_APIENTRY glIsVertexArrayOES (GLuint array); +#endif +#endif /* GL_OES_vertex_array_object */ + +#ifndef GL_AMD_compressed_3DC_texture +#define GL_AMD_compressed_3DC_texture 1 +#define GL_3DC_X_AMD 0x87F9 +#define GL_3DC_XY_AMD 0x87FA +#endif /* GL_AMD_compressed_3DC_texture */ + +#ifndef GL_AMD_compressed_ATC_texture +#define GL_AMD_compressed_ATC_texture 1 +#define GL_ATC_RGB_AMD 0x8C92 +#define GL_ATC_RGBA_EXPLICIT_ALPHA_AMD 0x8C93 +#define GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD 0x87EE +#endif /* GL_AMD_compressed_ATC_texture */ + +#ifndef GL_APPLE_copy_texture_levels +#define GL_APPLE_copy_texture_levels 1 +typedef void (GL_APIENTRYP PFNGLCOPYTEXTURELEVELSAPPLEPROC) (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glCopyTextureLevelsAPPLE (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount); +#endif +#endif /* GL_APPLE_copy_texture_levels */ + +#ifndef GL_APPLE_framebuffer_multisample +#define GL_APPLE_framebuffer_multisample 1 +#define GL_RENDERBUFFER_SAMPLES_APPLE 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE 0x8D56 +#define GL_MAX_SAMPLES_APPLE 0x8D57 +#define GL_READ_FRAMEBUFFER_APPLE 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_APPLE 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_APPLE 0x8CA6 +#define GL_READ_FRAMEBUFFER_BINDING_APPLE 0x8CAA +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEAPPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLRESOLVEMULTISAMPLEFRAMEBUFFERAPPLEPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glRenderbufferStorageMultisampleAPPLE (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glResolveMultisampleFramebufferAPPLE (void); +#endif +#endif /* GL_APPLE_framebuffer_multisample */ + +#ifndef GL_APPLE_sync +#define GL_APPLE_sync 1 +typedef struct __GLsync *GLsync; +typedef khronos_uint64_t GLuint64; +typedef khronos_int64_t GLint64; +#define GL_SYNC_OBJECT_APPLE 0x8A53 +#define GL_MAX_SERVER_WAIT_TIMEOUT_APPLE 0x9111 +#define GL_OBJECT_TYPE_APPLE 0x9112 +#define GL_SYNC_CONDITION_APPLE 0x9113 +#define GL_SYNC_STATUS_APPLE 0x9114 +#define GL_SYNC_FLAGS_APPLE 0x9115 +#define GL_SYNC_FENCE_APPLE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE_APPLE 0x9117 +#define GL_UNSIGNALED_APPLE 0x9118 +#define GL_SIGNALED_APPLE 0x9119 +#define GL_ALREADY_SIGNALED_APPLE 0x911A +#define GL_TIMEOUT_EXPIRED_APPLE 0x911B +#define GL_CONDITION_SATISFIED_APPLE 0x911C +#define GL_WAIT_FAILED_APPLE 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT_APPLE 0x00000001 +#define GL_TIMEOUT_IGNORED_APPLE 0xFFFFFFFFFFFFFFFFull +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCAPPLEPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCAPPLEPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCAPPLEPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VAPPLEPROC) (GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVAPPLEPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#ifdef GL_GLEXT_PROTOTYPES +GL_API GLsync GL_APIENTRY glFenceSyncAPPLE (GLenum condition, GLbitfield flags); +GL_API GLboolean GL_APIENTRY glIsSyncAPPLE (GLsync sync); +GL_API void GL_APIENTRY glDeleteSyncAPPLE (GLsync sync); +GL_API GLenum GL_APIENTRY glClientWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_API void GL_APIENTRY glWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_API void GL_APIENTRY glGetInteger64vAPPLE (GLenum pname, GLint64 *params); +GL_API void GL_APIENTRY glGetSyncivAPPLE (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#endif +#endif /* GL_APPLE_sync */ + +#ifndef GL_APPLE_texture_2D_limited_npot +#define GL_APPLE_texture_2D_limited_npot 1 +#endif /* GL_APPLE_texture_2D_limited_npot */ + +#ifndef GL_APPLE_texture_format_BGRA8888 +#define GL_APPLE_texture_format_BGRA8888 1 +#define GL_BGRA_EXT 0x80E1 +#define GL_BGRA8_EXT 0x93A1 +#endif /* GL_APPLE_texture_format_BGRA8888 */ + +#ifndef GL_APPLE_texture_max_level +#define GL_APPLE_texture_max_level 1 +#define GL_TEXTURE_MAX_LEVEL_APPLE 0x813D +#endif /* GL_APPLE_texture_max_level */ + +#ifndef GL_ARM_rgba8 +#define GL_ARM_rgba8 1 +#endif /* GL_ARM_rgba8 */ + +#ifndef GL_EXT_blend_minmax +#define GL_EXT_blend_minmax 1 +#define GL_MIN_EXT 0x8007 +#define GL_MAX_EXT 0x8008 +#endif /* GL_EXT_blend_minmax */ + +#ifndef GL_EXT_discard_framebuffer +#define GL_EXT_discard_framebuffer 1 +#define GL_COLOR_EXT 0x1800 +#define GL_DEPTH_EXT 0x1801 +#define GL_STENCIL_EXT 0x1802 +typedef void (GL_APIENTRYP PFNGLDISCARDFRAMEBUFFEREXTPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glDiscardFramebufferEXT (GLenum target, GLsizei numAttachments, const GLenum *attachments); +#endif +#endif /* GL_EXT_discard_framebuffer */ + +#ifndef GL_EXT_map_buffer_range +#define GL_EXT_map_buffer_range 1 +#define GL_MAP_READ_BIT_EXT 0x0001 +#define GL_MAP_WRITE_BIT_EXT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT_EXT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT_EXT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT_EXT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT_EXT 0x0020 +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void *GL_APIENTRY glMapBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_API void GL_APIENTRY glFlushMappedBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length); +#endif +#endif /* GL_EXT_map_buffer_range */ + +#ifndef GL_EXT_multi_draw_arrays +#define GL_EXT_multi_draw_arrays 1 +typedef void (GL_APIENTRYP PFNGLMULTIDRAWARRAYSEXTPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glMultiDrawArraysEXT (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +GL_API void GL_APIENTRY glMultiDrawElementsEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount); +#endif +#endif /* GL_EXT_multi_draw_arrays */ + +#ifndef GL_EXT_multisampled_render_to_texture +#define GL_EXT_multisampled_render_to_texture 1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT 0x8D6C +#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT 0x8D56 +#define GL_MAX_SAMPLES_EXT 0x8D57 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glRenderbufferStorageMultisampleEXT (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glFramebufferTexture2DMultisampleEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#endif +#endif /* GL_EXT_multisampled_render_to_texture */ + +#ifndef GL_EXT_read_format_bgra +#define GL_EXT_read_format_bgra 1 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV_EXT 0x8365 +#define GL_UNSIGNED_SHORT_1_5_5_5_REV_EXT 0x8366 +#endif /* GL_EXT_read_format_bgra */ + +#ifndef GL_EXT_robustness +#define GL_EXT_robustness 1 +#define GL_GUILTY_CONTEXT_RESET_EXT 0x8253 +#define GL_INNOCENT_CONTEXT_RESET_EXT 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET_EXT 0x8255 +#define GL_CONTEXT_ROBUST_ACCESS_EXT 0x90F3 +#define GL_RESET_NOTIFICATION_STRATEGY_EXT 0x8256 +#define GL_LOSE_CONTEXT_ON_RESET_EXT 0x8252 +#define GL_NO_RESET_NOTIFICATION_EXT 0x8261 +typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSEXTPROC) (void); +typedef void (GL_APIENTRYP PFNGLREADNPIXELSEXTPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API GLenum GL_APIENTRY glGetGraphicsResetStatusEXT (void); +GL_API void GL_APIENTRY glReadnPixelsEXT (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +GL_API void GL_APIENTRY glGetnUniformfvEXT (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GL_API void GL_APIENTRY glGetnUniformivEXT (GLuint program, GLint location, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_EXT_robustness */ + +#ifndef GL_EXT_sRGB +#define GL_EXT_sRGB 1 +#define GL_SRGB_EXT 0x8C40 +#define GL_SRGB_ALPHA_EXT 0x8C42 +#define GL_SRGB8_ALPHA8_EXT 0x8C43 +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT 0x8210 +#endif /* GL_EXT_sRGB */ + +#ifndef GL_EXT_texture_compression_dxt1 +#define GL_EXT_texture_compression_dxt1 1 +#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0 +#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1 +#endif /* GL_EXT_texture_compression_dxt1 */ + +#ifndef GL_EXT_texture_filter_anisotropic +#define GL_EXT_texture_filter_anisotropic 1 +#define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE +#define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF +#endif /* GL_EXT_texture_filter_anisotropic */ + +#ifndef GL_EXT_texture_format_BGRA8888 +#define GL_EXT_texture_format_BGRA8888 1 +#endif /* GL_EXT_texture_format_BGRA8888 */ + +#ifndef GL_EXT_texture_lod_bias +#define GL_EXT_texture_lod_bias 1 +#define GL_MAX_TEXTURE_LOD_BIAS_EXT 0x84FD +#define GL_TEXTURE_FILTER_CONTROL_EXT 0x8500 +#define GL_TEXTURE_LOD_BIAS_EXT 0x8501 +#endif /* GL_EXT_texture_lod_bias */ + +#ifndef GL_EXT_texture_storage +#define GL_EXT_texture_storage 1 +#define GL_TEXTURE_IMMUTABLE_FORMAT_EXT 0x912F +#define GL_ALPHA8_EXT 0x803C +#define GL_LUMINANCE8_EXT 0x8040 +#define GL_LUMINANCE8_ALPHA8_EXT 0x8045 +#define GL_RGBA32F_EXT 0x8814 +#define GL_RGB32F_EXT 0x8815 +#define GL_ALPHA32F_EXT 0x8816 +#define GL_LUMINANCE32F_EXT 0x8818 +#define GL_LUMINANCE_ALPHA32F_EXT 0x8819 +#define GL_RGBA16F_EXT 0x881A +#define GL_RGB16F_EXT 0x881B +#define GL_ALPHA16F_EXT 0x881C +#define GL_LUMINANCE16F_EXT 0x881E +#define GL_LUMINANCE_ALPHA16F_EXT 0x881F +#define GL_R8_EXT 0x8229 +#define GL_RG8_EXT 0x822B +#define GL_R32F_EXT 0x822E +#define GL_RG32F_EXT 0x8230 +#define GL_R16F_EXT 0x822D +#define GL_RG16F_EXT 0x822F +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE1DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE1DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE2DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE3DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glTexStorage1DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GL_API void GL_APIENTRY glTexStorage2DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glTexStorage3DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_API void GL_APIENTRY glTextureStorage1DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GL_API void GL_APIENTRY glTextureStorage2DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glTextureStorage3DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#endif +#endif /* GL_EXT_texture_storage */ + +#ifndef GL_IMG_multisampled_render_to_texture +#define GL_IMG_multisampled_render_to_texture 1 +#define GL_RENDERBUFFER_SAMPLES_IMG 0x9133 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG 0x9134 +#define GL_MAX_SAMPLES_IMG 0x9135 +#define GL_TEXTURE_SAMPLES_IMG 0x9136 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEIMGPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEIMGPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glRenderbufferStorageMultisampleIMG (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glFramebufferTexture2DMultisampleIMG (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#endif +#endif /* GL_IMG_multisampled_render_to_texture */ + +#ifndef GL_IMG_read_format +#define GL_IMG_read_format 1 +#define GL_BGRA_IMG 0x80E1 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV_IMG 0x8365 +#endif /* GL_IMG_read_format */ + +#ifndef GL_IMG_texture_compression_pvrtc +#define GL_IMG_texture_compression_pvrtc 1 +#define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00 +#define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01 +#define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02 +#define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03 +#endif /* GL_IMG_texture_compression_pvrtc */ + +#ifndef GL_IMG_texture_env_enhanced_fixed_function +#define GL_IMG_texture_env_enhanced_fixed_function 1 +#define GL_MODULATE_COLOR_IMG 0x8C04 +#define GL_RECIP_ADD_SIGNED_ALPHA_IMG 0x8C05 +#define GL_TEXTURE_ALPHA_MODULATE_IMG 0x8C06 +#define GL_FACTOR_ALPHA_MODULATE_IMG 0x8C07 +#define GL_FRAGMENT_ALPHA_MODULATE_IMG 0x8C08 +#define GL_ADD_BLEND_IMG 0x8C09 +#define GL_DOT3_RGBA_IMG 0x86AF +#endif /* GL_IMG_texture_env_enhanced_fixed_function */ + +#ifndef GL_IMG_user_clip_plane +#define GL_IMG_user_clip_plane 1 +#define GL_CLIP_PLANE0_IMG 0x3000 +#define GL_CLIP_PLANE1_IMG 0x3001 +#define GL_CLIP_PLANE2_IMG 0x3002 +#define GL_CLIP_PLANE3_IMG 0x3003 +#define GL_CLIP_PLANE4_IMG 0x3004 +#define GL_CLIP_PLANE5_IMG 0x3005 +#define GL_MAX_CLIP_PLANES_IMG 0x0D32 +typedef void (GL_APIENTRYP PFNGLCLIPPLANEFIMGPROC) (GLenum p, const GLfloat *eqn); +typedef void (GL_APIENTRYP PFNGLCLIPPLANEXIMGPROC) (GLenum p, const GLfixed *eqn); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glClipPlanefIMG (GLenum p, const GLfloat *eqn); +GL_API void GL_APIENTRY glClipPlanexIMG (GLenum p, const GLfixed *eqn); +#endif +#endif /* GL_IMG_user_clip_plane */ + +#ifndef GL_NV_fence +#define GL_NV_fence 1 +#define GL_ALL_COMPLETED_NV 0x84F2 +#define GL_FENCE_STATUS_NV 0x84F3 +#define GL_FENCE_CONDITION_NV 0x84F4 +typedef void (GL_APIENTRYP PFNGLDELETEFENCESNVPROC) (GLsizei n, const GLuint *fences); +typedef void (GL_APIENTRYP PFNGLGENFENCESNVPROC) (GLsizei n, GLuint *fences); +typedef GLboolean (GL_APIENTRYP PFNGLISFENCENVPROC) (GLuint fence); +typedef GLboolean (GL_APIENTRYP PFNGLTESTFENCENVPROC) (GLuint fence); +typedef void (GL_APIENTRYP PFNGLGETFENCEIVNVPROC) (GLuint fence, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLFINISHFENCENVPROC) (GLuint fence); +typedef void (GL_APIENTRYP PFNGLSETFENCENVPROC) (GLuint fence, GLenum condition); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glDeleteFencesNV (GLsizei n, const GLuint *fences); +GL_API void GL_APIENTRY glGenFencesNV (GLsizei n, GLuint *fences); +GL_API GLboolean GL_APIENTRY glIsFenceNV (GLuint fence); +GL_API GLboolean GL_APIENTRY glTestFenceNV (GLuint fence); +GL_API void GL_APIENTRY glGetFenceivNV (GLuint fence, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glFinishFenceNV (GLuint fence); +GL_API void GL_APIENTRY glSetFenceNV (GLuint fence, GLenum condition); +#endif +#endif /* GL_NV_fence */ + +#ifndef GL_QCOM_driver_control +#define GL_QCOM_driver_control 1 +typedef char GLchar; +typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSQCOMPROC) (GLint *num, GLsizei size, GLuint *driverControls); +typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSTRINGQCOMPROC) (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString); +typedef void (GL_APIENTRYP PFNGLENABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl); +typedef void (GL_APIENTRYP PFNGLDISABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glGetDriverControlsQCOM (GLint *num, GLsizei size, GLuint *driverControls); +GL_API void GL_APIENTRY glGetDriverControlStringQCOM (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString); +GL_API void GL_APIENTRY glEnableDriverControlQCOM (GLuint driverControl); +GL_API void GL_APIENTRY glDisableDriverControlQCOM (GLuint driverControl); +#endif +#endif /* GL_QCOM_driver_control */ + +#ifndef GL_QCOM_extended_get +#define GL_QCOM_extended_get 1 +#define GL_TEXTURE_WIDTH_QCOM 0x8BD2 +#define GL_TEXTURE_HEIGHT_QCOM 0x8BD3 +#define GL_TEXTURE_DEPTH_QCOM 0x8BD4 +#define GL_TEXTURE_INTERNAL_FORMAT_QCOM 0x8BD5 +#define GL_TEXTURE_FORMAT_QCOM 0x8BD6 +#define GL_TEXTURE_TYPE_QCOM 0x8BD7 +#define GL_TEXTURE_IMAGE_VALID_QCOM 0x8BD8 +#define GL_TEXTURE_NUM_LEVELS_QCOM 0x8BD9 +#define GL_TEXTURE_TARGET_QCOM 0x8BDA +#define GL_TEXTURE_OBJECT_VALID_QCOM 0x8BDB +#define GL_STATE_RESTORE 0x8BDC +typedef void (GL_APIENTRYP PFNGLEXTGETTEXTURESQCOMPROC) (GLuint *textures, GLint maxTextures, GLint *numTextures); +typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERSQCOMPROC) (GLuint *buffers, GLint maxBuffers, GLint *numBuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETRENDERBUFFERSQCOMPROC) (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETFRAMEBUFFERSQCOMPROC) (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETTEXLEVELPARAMETERIVQCOMPROC) (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLEXTTEXOBJECTSTATEOVERRIDEIQCOMPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLEXTGETTEXSUBIMAGEQCOMPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels); +typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERPOINTERVQCOMPROC) (GLenum target, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glExtGetTexturesQCOM (GLuint *textures, GLint maxTextures, GLint *numTextures); +GL_API void GL_APIENTRY glExtGetBuffersQCOM (GLuint *buffers, GLint maxBuffers, GLint *numBuffers); +GL_API void GL_APIENTRY glExtGetRenderbuffersQCOM (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers); +GL_API void GL_APIENTRY glExtGetFramebuffersQCOM (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers); +GL_API void GL_APIENTRY glExtGetTexLevelParameterivQCOM (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glExtTexObjectStateOverrideiQCOM (GLenum target, GLenum pname, GLint param); +GL_API void GL_APIENTRY glExtGetTexSubImageQCOM (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels); +GL_API void GL_APIENTRY glExtGetBufferPointervQCOM (GLenum target, void **params); +#endif +#endif /* GL_QCOM_extended_get */ + +#ifndef GL_QCOM_extended_get2 +#define GL_QCOM_extended_get2 1 +typedef void (GL_APIENTRYP PFNGLEXTGETSHADERSQCOMPROC) (GLuint *shaders, GLint maxShaders, GLint *numShaders); +typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMSQCOMPROC) (GLuint *programs, GLint maxPrograms, GLint *numPrograms); +typedef GLboolean (GL_APIENTRYP PFNGLEXTISPROGRAMBINARYQCOMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMBINARYSOURCEQCOMPROC) (GLuint program, GLenum shadertype, GLchar *source, GLint *length); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glExtGetShadersQCOM (GLuint *shaders, GLint maxShaders, GLint *numShaders); +GL_API void GL_APIENTRY glExtGetProgramsQCOM (GLuint *programs, GLint maxPrograms, GLint *numPrograms); +GL_API GLboolean GL_APIENTRY glExtIsProgramBinaryQCOM (GLuint program); +GL_API void GL_APIENTRY glExtGetProgramBinarySourceQCOM (GLuint program, GLenum shadertype, GLchar *source, GLint *length); +#endif +#endif /* GL_QCOM_extended_get2 */ + +#ifndef GL_QCOM_perfmon_global_mode +#define GL_QCOM_perfmon_global_mode 1 +#define GL_PERFMON_GLOBAL_MODE_QCOM 0x8FA0 +#endif /* GL_QCOM_perfmon_global_mode */ + +#ifndef GL_QCOM_tiled_rendering +#define GL_QCOM_tiled_rendering 1 +#define GL_COLOR_BUFFER_BIT0_QCOM 0x00000001 +#define GL_COLOR_BUFFER_BIT1_QCOM 0x00000002 +#define GL_COLOR_BUFFER_BIT2_QCOM 0x00000004 +#define GL_COLOR_BUFFER_BIT3_QCOM 0x00000008 +#define GL_COLOR_BUFFER_BIT4_QCOM 0x00000010 +#define GL_COLOR_BUFFER_BIT5_QCOM 0x00000020 +#define GL_COLOR_BUFFER_BIT6_QCOM 0x00000040 +#define GL_COLOR_BUFFER_BIT7_QCOM 0x00000080 +#define GL_DEPTH_BUFFER_BIT0_QCOM 0x00000100 +#define GL_DEPTH_BUFFER_BIT1_QCOM 0x00000200 +#define GL_DEPTH_BUFFER_BIT2_QCOM 0x00000400 +#define GL_DEPTH_BUFFER_BIT3_QCOM 0x00000800 +#define GL_DEPTH_BUFFER_BIT4_QCOM 0x00001000 +#define GL_DEPTH_BUFFER_BIT5_QCOM 0x00002000 +#define GL_DEPTH_BUFFER_BIT6_QCOM 0x00004000 +#define GL_DEPTH_BUFFER_BIT7_QCOM 0x00008000 +#define GL_STENCIL_BUFFER_BIT0_QCOM 0x00010000 +#define GL_STENCIL_BUFFER_BIT1_QCOM 0x00020000 +#define GL_STENCIL_BUFFER_BIT2_QCOM 0x00040000 +#define GL_STENCIL_BUFFER_BIT3_QCOM 0x00080000 +#define GL_STENCIL_BUFFER_BIT4_QCOM 0x00100000 +#define GL_STENCIL_BUFFER_BIT5_QCOM 0x00200000 +#define GL_STENCIL_BUFFER_BIT6_QCOM 0x00400000 +#define GL_STENCIL_BUFFER_BIT7_QCOM 0x00800000 +#define GL_MULTISAMPLE_BUFFER_BIT0_QCOM 0x01000000 +#define GL_MULTISAMPLE_BUFFER_BIT1_QCOM 0x02000000 +#define GL_MULTISAMPLE_BUFFER_BIT2_QCOM 0x04000000 +#define GL_MULTISAMPLE_BUFFER_BIT3_QCOM 0x08000000 +#define GL_MULTISAMPLE_BUFFER_BIT4_QCOM 0x10000000 +#define GL_MULTISAMPLE_BUFFER_BIT5_QCOM 0x20000000 +#define GL_MULTISAMPLE_BUFFER_BIT6_QCOM 0x40000000 +#define GL_MULTISAMPLE_BUFFER_BIT7_QCOM 0x80000000 +typedef void (GL_APIENTRYP PFNGLSTARTTILINGQCOMPROC) (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask); +typedef void (GL_APIENTRYP PFNGLENDTILINGQCOMPROC) (GLbitfield preserveMask); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glStartTilingQCOM (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask); +GL_API void GL_APIENTRY glEndTilingQCOM (GLbitfield preserveMask); +#endif +#endif /* GL_QCOM_tiled_rendering */ + +#ifndef GL_QCOM_writeonly_rendering +#define GL_QCOM_writeonly_rendering 1 +#define GL_WRITEONLY_RENDERING_QCOM 0x8823 +#endif /* GL_QCOM_writeonly_rendering */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/GLES/glplatform.h b/sources/khronos/GLES/glplatform.h new file mode 100755 index 00000000..ea967a81 --- /dev/null +++ b/sources/khronos/GLES/glplatform.h @@ -0,0 +1,38 @@ +#ifndef __glplatform_h_ +#define __glplatform_h_ + +/* +** Copyright (c) 2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* Platform-specific types and definitions for OpenGL ES 1.X gl.h + * + * Adopters may modify khrplatform.h and this file to suit their platform. + * Please contribute modifications back to Khronos as pull requests on the + * public github repository: + * https://github.com/KhronosGroup/OpenGL-Registry + */ + +#include + +#ifndef GL_API +#define GL_API KHRONOS_APICALL +#endif + +#ifndef GL_APIENTRY +#define GL_APIENTRY KHRONOS_APIENTRY +#endif + +#endif /* __glplatform_h_ */ diff --git a/sources/khronos/GLES2/gl2.h b/sources/khronos/GLES2/gl2.h new file mode 100755 index 00000000..50ac6112 --- /dev/null +++ b/sources/khronos/GLES2/gl2.h @@ -0,0 +1,675 @@ +#ifndef __gl2_h_ +#define __gl2_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#include + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +#ifndef GL_GLES_PROTOTYPES +#define GL_GLES_PROTOTYPES 1 +#endif + +/* Generated on date 20170613 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9] + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_ES_VERSION_2_0 +#define GL_ES_VERSION_2_0 1 +#include +typedef khronos_int8_t GLbyte; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLfixed; +typedef short GLshort; +typedef unsigned short GLushort; +typedef void GLvoid; +typedef struct __GLsync *GLsync; +typedef khronos_int64_t GLint64; +typedef khronos_uint64_t GLuint64; +typedef unsigned int GLenum; +typedef unsigned int GLuint; +typedef char GLchar; +typedef khronos_float_t GLfloat; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_uint8_t GLubyte; +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_FUNC_ADD 0x8006 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_BLEND 0x0BE2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_LINE_WIDTH 0x0B21 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VIEWPORT 0x0BA2 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_SHADER_TYPE 0x8B4F +#define GL_DELETE_STATUS 0x8B80 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_INVERT 0x150A +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGB565 0x8D62 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_NONE 0 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s); +typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures); +typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer); +typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode); +typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width); +typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture); +GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target); +GL_APICALL void GL_APIENTRY glClear (GLbitfield mask); +GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d); +GL_APICALL void GL_APIENTRY glClearStencil (GLint s); +GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader); +GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL GLuint GL_APIENTRY glCreateProgram (void); +GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type); +GL_APICALL void GL_APIENTRY glCullFace (GLenum mode); +GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program); +GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader); +GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func); +GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag); +GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glDisable (GLenum cap); +GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glEnable (GLenum cap); +GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glFinish (void); +GL_APICALL void GL_APIENTRY glFlush (void); +GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode); +GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target); +GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL GLenum GL_APIENTRY glGetError (void); +GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); +GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer); +GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program); +GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader); +GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width); +GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program); +GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void); +GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgram (GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program); +GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif +#endif /* GL_ES_VERSION_2_0 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/GLES2/gl2ext.h b/sources/khronos/GLES2/gl2ext.h new file mode 100755 index 00000000..b5eb7236 --- /dev/null +++ b/sources/khronos/GLES2/gl2ext.h @@ -0,0 +1,3445 @@ +#ifndef __gl2ext_h_ +#define __gl2ext_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +/* Generated on date 20170613 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9] + * Versions emitted: _nomatch_^ + * Default extensions included: gles2 + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_KHR_blend_equation_advanced +#define GL_KHR_blend_equation_advanced 1 +#define GL_MULTIPLY_KHR 0x9294 +#define GL_SCREEN_KHR 0x9295 +#define GL_OVERLAY_KHR 0x9296 +#define GL_DARKEN_KHR 0x9297 +#define GL_LIGHTEN_KHR 0x9298 +#define GL_COLORDODGE_KHR 0x9299 +#define GL_COLORBURN_KHR 0x929A +#define GL_HARDLIGHT_KHR 0x929B +#define GL_SOFTLIGHT_KHR 0x929C +#define GL_DIFFERENCE_KHR 0x929E +#define GL_EXCLUSION_KHR 0x92A0 +#define GL_HSL_HUE_KHR 0x92AD +#define GL_HSL_SATURATION_KHR 0x92AE +#define GL_HSL_COLOR_KHR 0x92AF +#define GL_HSL_LUMINOSITY_KHR 0x92B0 +typedef void (GL_APIENTRYP PFNGLBLENDBARRIERKHRPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlendBarrierKHR (void); +#endif +#endif /* GL_KHR_blend_equation_advanced */ + +#ifndef GL_KHR_blend_equation_advanced_coherent +#define GL_KHR_blend_equation_advanced_coherent 1 +#define GL_BLEND_ADVANCED_COHERENT_KHR 0x9285 +#endif /* GL_KHR_blend_equation_advanced_coherent */ + +#ifndef GL_KHR_context_flush_control +#define GL_KHR_context_flush_control 1 +#define GL_CONTEXT_RELEASE_BEHAVIOR_KHR 0x82FB +#define GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR 0x82FC +#endif /* GL_KHR_context_flush_control */ + +#ifndef GL_KHR_debug +#define GL_KHR_debug 1 +typedef void (GL_APIENTRY *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +#define GL_SAMPLER 0x82E6 +#define GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR 0x8242 +#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_KHR 0x8243 +#define GL_DEBUG_CALLBACK_FUNCTION_KHR 0x8244 +#define GL_DEBUG_CALLBACK_USER_PARAM_KHR 0x8245 +#define GL_DEBUG_SOURCE_API_KHR 0x8246 +#define GL_DEBUG_SOURCE_WINDOW_SYSTEM_KHR 0x8247 +#define GL_DEBUG_SOURCE_SHADER_COMPILER_KHR 0x8248 +#define GL_DEBUG_SOURCE_THIRD_PARTY_KHR 0x8249 +#define GL_DEBUG_SOURCE_APPLICATION_KHR 0x824A +#define GL_DEBUG_SOURCE_OTHER_KHR 0x824B +#define GL_DEBUG_TYPE_ERROR_KHR 0x824C +#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR 0x824D +#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR 0x824E +#define GL_DEBUG_TYPE_PORTABILITY_KHR 0x824F +#define GL_DEBUG_TYPE_PERFORMANCE_KHR 0x8250 +#define GL_DEBUG_TYPE_OTHER_KHR 0x8251 +#define GL_DEBUG_TYPE_MARKER_KHR 0x8268 +#define GL_DEBUG_TYPE_PUSH_GROUP_KHR 0x8269 +#define GL_DEBUG_TYPE_POP_GROUP_KHR 0x826A +#define GL_DEBUG_SEVERITY_NOTIFICATION_KHR 0x826B +#define GL_MAX_DEBUG_GROUP_STACK_DEPTH_KHR 0x826C +#define GL_DEBUG_GROUP_STACK_DEPTH_KHR 0x826D +#define GL_BUFFER_KHR 0x82E0 +#define GL_SHADER_KHR 0x82E1 +#define GL_PROGRAM_KHR 0x82E2 +#define GL_VERTEX_ARRAY_KHR 0x8074 +#define GL_QUERY_KHR 0x82E3 +#define GL_PROGRAM_PIPELINE_KHR 0x82E4 +#define GL_SAMPLER_KHR 0x82E6 +#define GL_MAX_LABEL_LENGTH_KHR 0x82E8 +#define GL_MAX_DEBUG_MESSAGE_LENGTH_KHR 0x9143 +#define GL_MAX_DEBUG_LOGGED_MESSAGES_KHR 0x9144 +#define GL_DEBUG_LOGGED_MESSAGES_KHR 0x9145 +#define GL_DEBUG_SEVERITY_HIGH_KHR 0x9146 +#define GL_DEBUG_SEVERITY_MEDIUM_KHR 0x9147 +#define GL_DEBUG_SEVERITY_LOW_KHR 0x9148 +#define GL_DEBUG_OUTPUT_KHR 0x92E0 +#define GL_CONTEXT_FLAG_DEBUG_BIT_KHR 0x00000002 +#define GL_STACK_OVERFLOW_KHR 0x0503 +#define GL_STACK_UNDERFLOW_KHR 0x0504 +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECONTROLKHRPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGEINSERTKHRPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECALLBACKKHRPROC) (GLDEBUGPROCKHR callback, const void *userParam); +typedef GLuint (GL_APIENTRYP PFNGLGETDEBUGMESSAGELOGKHRPROC) (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +typedef void (GL_APIENTRYP PFNGLPUSHDEBUGGROUPKHRPROC) (GLenum source, GLuint id, GLsizei length, const GLchar *message); +typedef void (GL_APIENTRYP PFNGLPOPDEBUGGROUPKHRPROC) (void); +typedef void (GL_APIENTRYP PFNGLOBJECTLABELKHRPROC) (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTLABELKHRPROC) (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (GL_APIENTRYP PFNGLOBJECTPTRLABELKHRPROC) (const void *ptr, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTPTRLABELKHRPROC) (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETPOINTERVKHRPROC) (GLenum pname, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDebugMessageControlKHR (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +GL_APICALL void GL_APIENTRY glDebugMessageInsertKHR (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +GL_APICALL void GL_APIENTRY glDebugMessageCallbackKHR (GLDEBUGPROCKHR callback, const void *userParam); +GL_APICALL GLuint GL_APIENTRY glGetDebugMessageLogKHR (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +GL_APICALL void GL_APIENTRY glPushDebugGroupKHR (GLenum source, GLuint id, GLsizei length, const GLchar *message); +GL_APICALL void GL_APIENTRY glPopDebugGroupKHR (void); +GL_APICALL void GL_APIENTRY glObjectLabelKHR (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectLabelKHR (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +GL_APICALL void GL_APIENTRY glObjectPtrLabelKHR (const void *ptr, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectPtrLabelKHR (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +GL_APICALL void GL_APIENTRY glGetPointervKHR (GLenum pname, void **params); +#endif +#endif /* GL_KHR_debug */ + +#ifndef GL_KHR_no_error +#define GL_KHR_no_error 1 +#define GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR 0x00000008 +#endif /* GL_KHR_no_error */ + +#ifndef GL_KHR_robust_buffer_access_behavior +#define GL_KHR_robust_buffer_access_behavior 1 +#endif /* GL_KHR_robust_buffer_access_behavior */ + +#ifndef GL_KHR_robustness +#define GL_KHR_robustness 1 +#define GL_CONTEXT_ROBUST_ACCESS_KHR 0x90F3 +#define GL_LOSE_CONTEXT_ON_RESET_KHR 0x8252 +#define GL_GUILTY_CONTEXT_RESET_KHR 0x8253 +#define GL_INNOCENT_CONTEXT_RESET_KHR 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET_KHR 0x8255 +#define GL_RESET_NOTIFICATION_STRATEGY_KHR 0x8256 +#define GL_NO_RESET_NOTIFICATION_KHR 0x8261 +#define GL_CONTEXT_LOST_KHR 0x0507 +typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSKHRPROC) (void); +typedef void (GL_APIENTRYP PFNGLREADNPIXELSKHRPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMUIVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusKHR (void); +GL_APICALL void GL_APIENTRY glReadnPixelsKHR (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +GL_APICALL void GL_APIENTRY glGetnUniformfvKHR (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetnUniformivKHR (GLuint program, GLint location, GLsizei bufSize, GLint *params); +GL_APICALL void GL_APIENTRY glGetnUniformuivKHR (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +#endif +#endif /* GL_KHR_robustness */ + +#ifndef GL_KHR_texture_compression_astc_hdr +#define GL_KHR_texture_compression_astc_hdr 1 +#define GL_COMPRESSED_RGBA_ASTC_4x4_KHR 0x93B0 +#define GL_COMPRESSED_RGBA_ASTC_5x4_KHR 0x93B1 +#define GL_COMPRESSED_RGBA_ASTC_5x5_KHR 0x93B2 +#define GL_COMPRESSED_RGBA_ASTC_6x5_KHR 0x93B3 +#define GL_COMPRESSED_RGBA_ASTC_6x6_KHR 0x93B4 +#define GL_COMPRESSED_RGBA_ASTC_8x5_KHR 0x93B5 +#define GL_COMPRESSED_RGBA_ASTC_8x6_KHR 0x93B6 +#define GL_COMPRESSED_RGBA_ASTC_8x8_KHR 0x93B7 +#define GL_COMPRESSED_RGBA_ASTC_10x5_KHR 0x93B8 +#define GL_COMPRESSED_RGBA_ASTC_10x6_KHR 0x93B9 +#define GL_COMPRESSED_RGBA_ASTC_10x8_KHR 0x93BA +#define GL_COMPRESSED_RGBA_ASTC_10x10_KHR 0x93BB +#define GL_COMPRESSED_RGBA_ASTC_12x10_KHR 0x93BC +#define GL_COMPRESSED_RGBA_ASTC_12x12_KHR 0x93BD +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR 0x93D0 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR 0x93D1 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR 0x93D2 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR 0x93D3 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR 0x93D4 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR 0x93D5 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR 0x93D6 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR 0x93D7 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR 0x93D8 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR 0x93D9 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR 0x93DA +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR 0x93DB +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR 0x93DC +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR 0x93DD +#endif /* GL_KHR_texture_compression_astc_hdr */ + +#ifndef GL_KHR_texture_compression_astc_ldr +#define GL_KHR_texture_compression_astc_ldr 1 +#endif /* GL_KHR_texture_compression_astc_ldr */ + +#ifndef GL_KHR_texture_compression_astc_sliced_3d +#define GL_KHR_texture_compression_astc_sliced_3d 1 +#endif /* GL_KHR_texture_compression_astc_sliced_3d */ + +#ifndef GL_OES_EGL_image +#define GL_OES_EGL_image 1 +typedef void *GLeglImageOES; +typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) (GLenum target, GLeglImageOES image); +typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETRENDERBUFFERSTORAGEOESPROC) (GLenum target, GLeglImageOES image); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glEGLImageTargetTexture2DOES (GLenum target, GLeglImageOES image); +GL_APICALL void GL_APIENTRY glEGLImageTargetRenderbufferStorageOES (GLenum target, GLeglImageOES image); +#endif +#endif /* GL_OES_EGL_image */ + +#ifndef GL_OES_EGL_image_external +#define GL_OES_EGL_image_external 1 +#define GL_TEXTURE_EXTERNAL_OES 0x8D65 +#define GL_TEXTURE_BINDING_EXTERNAL_OES 0x8D67 +#define GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES 0x8D68 +#define GL_SAMPLER_EXTERNAL_OES 0x8D66 +#endif /* GL_OES_EGL_image_external */ + +#ifndef GL_OES_EGL_image_external_essl3 +#define GL_OES_EGL_image_external_essl3 1 +#endif /* GL_OES_EGL_image_external_essl3 */ + +#ifndef GL_OES_compressed_ETC1_RGB8_sub_texture +#define GL_OES_compressed_ETC1_RGB8_sub_texture 1 +#endif /* GL_OES_compressed_ETC1_RGB8_sub_texture */ + +#ifndef GL_OES_compressed_ETC1_RGB8_texture +#define GL_OES_compressed_ETC1_RGB8_texture 1 +#define GL_ETC1_RGB8_OES 0x8D64 +#endif /* GL_OES_compressed_ETC1_RGB8_texture */ + +#ifndef GL_OES_compressed_paletted_texture +#define GL_OES_compressed_paletted_texture 1 +#define GL_PALETTE4_RGB8_OES 0x8B90 +#define GL_PALETTE4_RGBA8_OES 0x8B91 +#define GL_PALETTE4_R5_G6_B5_OES 0x8B92 +#define GL_PALETTE4_RGBA4_OES 0x8B93 +#define GL_PALETTE4_RGB5_A1_OES 0x8B94 +#define GL_PALETTE8_RGB8_OES 0x8B95 +#define GL_PALETTE8_RGBA8_OES 0x8B96 +#define GL_PALETTE8_R5_G6_B5_OES 0x8B97 +#define GL_PALETTE8_RGBA4_OES 0x8B98 +#define GL_PALETTE8_RGB5_A1_OES 0x8B99 +#endif /* GL_OES_compressed_paletted_texture */ + +#ifndef GL_OES_copy_image +#define GL_OES_copy_image 1 +typedef void (GL_APIENTRYP PFNGLCOPYIMAGESUBDATAOESPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCopyImageSubDataOES (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#endif +#endif /* GL_OES_copy_image */ + +#ifndef GL_OES_depth24 +#define GL_OES_depth24 1 +#define GL_DEPTH_COMPONENT24_OES 0x81A6 +#endif /* GL_OES_depth24 */ + +#ifndef GL_OES_depth32 +#define GL_OES_depth32 1 +#define GL_DEPTH_COMPONENT32_OES 0x81A7 +#endif /* GL_OES_depth32 */ + +#ifndef GL_OES_depth_texture +#define GL_OES_depth_texture 1 +#endif /* GL_OES_depth_texture */ + +#ifndef GL_OES_draw_buffers_indexed +#define GL_OES_draw_buffers_indexed 1 +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +typedef void (GL_APIENTRYP PFNGLENABLEIOESPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDISABLEIOESPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONIOESPROC) (GLuint buf, GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEIOESPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCIOESPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEIOESPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +typedef void (GL_APIENTRYP PFNGLCOLORMASKIOESPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDIOESPROC) (GLenum target, GLuint index); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glEnableiOES (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisableiOES (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glBlendEquationiOES (GLuint buf, GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparateiOES (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunciOES (GLuint buf, GLenum src, GLenum dst); +GL_APICALL void GL_APIENTRY glBlendFuncSeparateiOES (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +GL_APICALL void GL_APIENTRY glColorMaskiOES (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GL_APICALL GLboolean GL_APIENTRY glIsEnablediOES (GLenum target, GLuint index); +#endif +#endif /* GL_OES_draw_buffers_indexed */ + +#ifndef GL_OES_draw_elements_base_vertex +#define GL_OES_draw_elements_base_vertex 1 +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXOESPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXOESPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXOESPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSBASEVERTEXOESPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawElementsBaseVertexOES (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawRangeElementsBaseVertexOES (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexOES (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +GL_APICALL void GL_APIENTRY glMultiDrawElementsBaseVertexOES (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex); +#endif +#endif /* GL_OES_draw_elements_base_vertex */ + +#ifndef GL_OES_element_index_uint +#define GL_OES_element_index_uint 1 +#endif /* GL_OES_element_index_uint */ + +#ifndef GL_OES_fbo_render_mipmap +#define GL_OES_fbo_render_mipmap 1 +#endif /* GL_OES_fbo_render_mipmap */ + +#ifndef GL_OES_fragment_precision_high +#define GL_OES_fragment_precision_high 1 +#endif /* GL_OES_fragment_precision_high */ + +#ifndef GL_OES_geometry_point_size +#define GL_OES_geometry_point_size 1 +#endif /* GL_OES_geometry_point_size */ + +#ifndef GL_OES_geometry_shader +#define GL_OES_geometry_shader 1 +#define GL_GEOMETRY_SHADER_OES 0x8DD9 +#define GL_GEOMETRY_SHADER_BIT_OES 0x00000004 +#define GL_GEOMETRY_LINKED_VERTICES_OUT_OES 0x8916 +#define GL_GEOMETRY_LINKED_INPUT_TYPE_OES 0x8917 +#define GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES 0x8918 +#define GL_GEOMETRY_SHADER_INVOCATIONS_OES 0x887F +#define GL_LAYER_PROVOKING_VERTEX_OES 0x825E +#define GL_LINES_ADJACENCY_OES 0x000A +#define GL_LINE_STRIP_ADJACENCY_OES 0x000B +#define GL_TRIANGLES_ADJACENCY_OES 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY_OES 0x000D +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES 0x8DDF +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES 0x8A2C +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES 0x8A32 +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES 0x9124 +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES 0x8DE1 +#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES 0x8E5A +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES 0x8C29 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES 0x92CF +#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES 0x92D5 +#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES 0x90CD +#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES 0x90D7 +#define GL_FIRST_VERTEX_CONVENTION_OES 0x8E4D +#define GL_LAST_VERTEX_CONVENTION_OES 0x8E4E +#define GL_UNDEFINED_VERTEX_OES 0x8260 +#define GL_PRIMITIVES_GENERATED_OES 0x8C87 +#define GL_FRAMEBUFFER_DEFAULT_LAYERS_OES 0x9312 +#define GL_MAX_FRAMEBUFFER_LAYERS_OES 0x9317 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES 0x8DA8 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES 0x8DA7 +#define GL_REFERENCED_BY_GEOMETRY_SHADER_OES 0x9309 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREOESPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTextureOES (GLenum target, GLenum attachment, GLuint texture, GLint level); +#endif +#endif /* GL_OES_geometry_shader */ + +#ifndef GL_OES_get_program_binary +#define GL_OES_get_program_binary 1 +#define GL_PROGRAM_BINARY_LENGTH_OES 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS_OES 0x87FE +#define GL_PROGRAM_BINARY_FORMATS_OES 0x87FF +typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYOESPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYOESPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLint length); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetProgramBinaryOES (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +GL_APICALL void GL_APIENTRY glProgramBinaryOES (GLuint program, GLenum binaryFormat, const void *binary, GLint length); +#endif +#endif /* GL_OES_get_program_binary */ + +#ifndef GL_OES_gpu_shader5 +#define GL_OES_gpu_shader5 1 +#endif /* GL_OES_gpu_shader5 */ + +#ifndef GL_OES_mapbuffer +#define GL_OES_mapbuffer 1 +#define GL_WRITE_ONLY_OES 0x88B9 +#define GL_BUFFER_ACCESS_OES 0x88BB +#define GL_BUFFER_MAPPED_OES 0x88BC +#define GL_BUFFER_MAP_POINTER_OES 0x88BD +typedef void *(GL_APIENTRYP PFNGLMAPBUFFEROESPROC) (GLenum target, GLenum access); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFEROESPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVOESPROC) (GLenum target, GLenum pname, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void *GL_APIENTRY glMapBufferOES (GLenum target, GLenum access); +GL_APICALL GLboolean GL_APIENTRY glUnmapBufferOES (GLenum target); +GL_APICALL void GL_APIENTRY glGetBufferPointervOES (GLenum target, GLenum pname, void **params); +#endif +#endif /* GL_OES_mapbuffer */ + +#ifndef GL_OES_packed_depth_stencil +#define GL_OES_packed_depth_stencil 1 +#define GL_DEPTH_STENCIL_OES 0x84F9 +#define GL_UNSIGNED_INT_24_8_OES 0x84FA +#define GL_DEPTH24_STENCIL8_OES 0x88F0 +#endif /* GL_OES_packed_depth_stencil */ + +#ifndef GL_OES_primitive_bounding_box +#define GL_OES_primitive_bounding_box 1 +#define GL_PRIMITIVE_BOUNDING_BOX_OES 0x92BE +typedef void (GL_APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXOESPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPrimitiveBoundingBoxOES (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +#endif +#endif /* GL_OES_primitive_bounding_box */ + +#ifndef GL_OES_required_internalformat +#define GL_OES_required_internalformat 1 +#define GL_ALPHA8_OES 0x803C +#define GL_DEPTH_COMPONENT16_OES 0x81A5 +#define GL_LUMINANCE4_ALPHA4_OES 0x8043 +#define GL_LUMINANCE8_ALPHA8_OES 0x8045 +#define GL_LUMINANCE8_OES 0x8040 +#define GL_RGBA4_OES 0x8056 +#define GL_RGB5_A1_OES 0x8057 +#define GL_RGB565_OES 0x8D62 +#define GL_RGB8_OES 0x8051 +#define GL_RGBA8_OES 0x8058 +#define GL_RGB10_EXT 0x8052 +#define GL_RGB10_A2_EXT 0x8059 +#endif /* GL_OES_required_internalformat */ + +#ifndef GL_OES_rgb8_rgba8 +#define GL_OES_rgb8_rgba8 1 +#endif /* GL_OES_rgb8_rgba8 */ + +#ifndef GL_OES_sample_shading +#define GL_OES_sample_shading 1 +#define GL_SAMPLE_SHADING_OES 0x8C36 +#define GL_MIN_SAMPLE_SHADING_VALUE_OES 0x8C37 +typedef void (GL_APIENTRYP PFNGLMINSAMPLESHADINGOESPROC) (GLfloat value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glMinSampleShadingOES (GLfloat value); +#endif +#endif /* GL_OES_sample_shading */ + +#ifndef GL_OES_sample_variables +#define GL_OES_sample_variables 1 +#endif /* GL_OES_sample_variables */ + +#ifndef GL_OES_shader_image_atomic +#define GL_OES_shader_image_atomic 1 +#endif /* GL_OES_shader_image_atomic */ + +#ifndef GL_OES_shader_io_blocks +#define GL_OES_shader_io_blocks 1 +#endif /* GL_OES_shader_io_blocks */ + +#ifndef GL_OES_shader_multisample_interpolation +#define GL_OES_shader_multisample_interpolation 1 +#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_OES 0x8E5B +#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_OES 0x8E5C +#define GL_FRAGMENT_INTERPOLATION_OFFSET_BITS_OES 0x8E5D +#endif /* GL_OES_shader_multisample_interpolation */ + +#ifndef GL_OES_standard_derivatives +#define GL_OES_standard_derivatives 1 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES 0x8B8B +#endif /* GL_OES_standard_derivatives */ + +#ifndef GL_OES_stencil1 +#define GL_OES_stencil1 1 +#define GL_STENCIL_INDEX1_OES 0x8D46 +#endif /* GL_OES_stencil1 */ + +#ifndef GL_OES_stencil4 +#define GL_OES_stencil4 1 +#define GL_STENCIL_INDEX4_OES 0x8D47 +#endif /* GL_OES_stencil4 */ + +#ifndef GL_OES_surfaceless_context +#define GL_OES_surfaceless_context 1 +#define GL_FRAMEBUFFER_UNDEFINED_OES 0x8219 +#endif /* GL_OES_surfaceless_context */ + +#ifndef GL_OES_tessellation_point_size +#define GL_OES_tessellation_point_size 1 +#endif /* GL_OES_tessellation_point_size */ + +#ifndef GL_OES_tessellation_shader +#define GL_OES_tessellation_shader 1 +#define GL_PATCHES_OES 0x000E +#define GL_PATCH_VERTICES_OES 0x8E72 +#define GL_TESS_CONTROL_OUTPUT_VERTICES_OES 0x8E75 +#define GL_TESS_GEN_MODE_OES 0x8E76 +#define GL_TESS_GEN_SPACING_OES 0x8E77 +#define GL_TESS_GEN_VERTEX_ORDER_OES 0x8E78 +#define GL_TESS_GEN_POINT_MODE_OES 0x8E79 +#define GL_ISOLINES_OES 0x8E7A +#define GL_QUADS_OES 0x0007 +#define GL_FRACTIONAL_ODD_OES 0x8E7B +#define GL_FRACTIONAL_EVEN_OES 0x8E7C +#define GL_MAX_PATCH_VERTICES_OES 0x8E7D +#define GL_MAX_TESS_GEN_LEVEL_OES 0x8E7E +#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_OES 0x8E7F +#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_OES 0x8E80 +#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_OES 0x8E81 +#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_OES 0x8E82 +#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_OES 0x8E83 +#define GL_MAX_TESS_PATCH_COMPONENTS_OES 0x8E84 +#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_OES 0x8E85 +#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_OES 0x8E86 +#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_OES 0x8E89 +#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_OES 0x8E8A +#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_OES 0x886C +#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_OES 0x886D +#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_OES 0x8E1E +#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_OES 0x8E1F +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_OES 0x92CD +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_OES 0x92CE +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_OES 0x92D3 +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_OES 0x92D4 +#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_OES 0x90CB +#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_OES 0x90CC +#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_OES 0x90D8 +#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_OES 0x90D9 +#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED_OES 0x8221 +#define GL_IS_PER_PATCH_OES 0x92E7 +#define GL_REFERENCED_BY_TESS_CONTROL_SHADER_OES 0x9307 +#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER_OES 0x9308 +#define GL_TESS_CONTROL_SHADER_OES 0x8E88 +#define GL_TESS_EVALUATION_SHADER_OES 0x8E87 +#define GL_TESS_CONTROL_SHADER_BIT_OES 0x00000008 +#define GL_TESS_EVALUATION_SHADER_BIT_OES 0x00000010 +typedef void (GL_APIENTRYP PFNGLPATCHPARAMETERIOESPROC) (GLenum pname, GLint value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPatchParameteriOES (GLenum pname, GLint value); +#endif +#endif /* GL_OES_tessellation_shader */ + +#ifndef GL_OES_texture_3D +#define GL_OES_texture_3D 1 +#define GL_TEXTURE_WRAP_R_OES 0x8072 +#define GL_TEXTURE_3D_OES 0x806F +#define GL_TEXTURE_BINDING_3D_OES 0x806A +#define GL_MAX_3D_TEXTURE_SIZE_OES 0x8073 +#define GL_SAMPLER_3D_OES 0x8B5F +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_OES 0x8CD4 +typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DOESPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DOESPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE3DOESPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexImage3DOES (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glCopyTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glCompressedTexImage3DOES (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glFramebufferTexture3DOES (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +#endif +#endif /* GL_OES_texture_3D */ + +#ifndef GL_OES_texture_border_clamp +#define GL_OES_texture_border_clamp 1 +#define GL_TEXTURE_BORDER_COLOR_OES 0x1004 +#define GL_CLAMP_TO_BORDER_OES 0x812D +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIIVOESPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIUIVOESPROC) (GLenum target, GLenum pname, const GLuint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIIVOESPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIUIVOESPROC) (GLenum target, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIIVOESPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIUIVOESPROC) (GLuint sampler, GLenum pname, const GLuint *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIIVOESPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVOESPROC) (GLuint sampler, GLenum pname, GLuint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexParameterIivOES (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexParameterIuivOES (GLenum target, GLenum pname, const GLuint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIivOES (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIuivOES (GLenum target, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glSamplerParameterIivOES (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterIuivOES (GLuint sampler, GLenum pname, const GLuint *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIivOES (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIuivOES (GLuint sampler, GLenum pname, GLuint *params); +#endif +#endif /* GL_OES_texture_border_clamp */ + +#ifndef GL_OES_texture_buffer +#define GL_OES_texture_buffer 1 +#define GL_TEXTURE_BUFFER_OES 0x8C2A +#define GL_TEXTURE_BUFFER_BINDING_OES 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE_OES 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER_OES 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_OES 0x8C2D +#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_OES 0x919F +#define GL_SAMPLER_BUFFER_OES 0x8DC2 +#define GL_INT_SAMPLER_BUFFER_OES 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER_OES 0x8DD8 +#define GL_IMAGE_BUFFER_OES 0x9051 +#define GL_INT_IMAGE_BUFFER_OES 0x905C +#define GL_UNSIGNED_INT_IMAGE_BUFFER_OES 0x9067 +#define GL_TEXTURE_BUFFER_OFFSET_OES 0x919D +#define GL_TEXTURE_BUFFER_SIZE_OES 0x919E +typedef void (GL_APIENTRYP PFNGLTEXBUFFEROESPROC) (GLenum target, GLenum internalformat, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTEXBUFFERRANGEOESPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexBufferOES (GLenum target, GLenum internalformat, GLuint buffer); +GL_APICALL void GL_APIENTRY glTexBufferRangeOES (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#endif +#endif /* GL_OES_texture_buffer */ + +#ifndef GL_OES_texture_compression_astc +#define GL_OES_texture_compression_astc 1 +#define GL_COMPRESSED_RGBA_ASTC_3x3x3_OES 0x93C0 +#define GL_COMPRESSED_RGBA_ASTC_4x3x3_OES 0x93C1 +#define GL_COMPRESSED_RGBA_ASTC_4x4x3_OES 0x93C2 +#define GL_COMPRESSED_RGBA_ASTC_4x4x4_OES 0x93C3 +#define GL_COMPRESSED_RGBA_ASTC_5x4x4_OES 0x93C4 +#define GL_COMPRESSED_RGBA_ASTC_5x5x4_OES 0x93C5 +#define GL_COMPRESSED_RGBA_ASTC_5x5x5_OES 0x93C6 +#define GL_COMPRESSED_RGBA_ASTC_6x5x5_OES 0x93C7 +#define GL_COMPRESSED_RGBA_ASTC_6x6x5_OES 0x93C8 +#define GL_COMPRESSED_RGBA_ASTC_6x6x6_OES 0x93C9 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES 0x93E0 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES 0x93E1 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES 0x93E2 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES 0x93E3 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES 0x93E4 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES 0x93E5 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES 0x93E6 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES 0x93E7 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES 0x93E8 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES 0x93E9 +#endif /* GL_OES_texture_compression_astc */ + +#ifndef GL_OES_texture_cube_map_array +#define GL_OES_texture_cube_map_array 1 +#define GL_TEXTURE_CUBE_MAP_ARRAY_OES 0x9009 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_OES 0x900A +#define GL_SAMPLER_CUBE_MAP_ARRAY_OES 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_OES 0x900D +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_OES 0x900E +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_OES 0x900F +#define GL_IMAGE_CUBE_MAP_ARRAY_OES 0x9054 +#define GL_INT_IMAGE_CUBE_MAP_ARRAY_OES 0x905F +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_OES 0x906A +#endif /* GL_OES_texture_cube_map_array */ + +#ifndef GL_OES_texture_float +#define GL_OES_texture_float 1 +#endif /* GL_OES_texture_float */ + +#ifndef GL_OES_texture_float_linear +#define GL_OES_texture_float_linear 1 +#endif /* GL_OES_texture_float_linear */ + +#ifndef GL_OES_texture_half_float +#define GL_OES_texture_half_float 1 +#define GL_HALF_FLOAT_OES 0x8D61 +#endif /* GL_OES_texture_half_float */ + +#ifndef GL_OES_texture_half_float_linear +#define GL_OES_texture_half_float_linear 1 +#endif /* GL_OES_texture_half_float_linear */ + +#ifndef GL_OES_texture_npot +#define GL_OES_texture_npot 1 +#endif /* GL_OES_texture_npot */ + +#ifndef GL_OES_texture_stencil8 +#define GL_OES_texture_stencil8 1 +#define GL_STENCIL_INDEX_OES 0x1901 +#define GL_STENCIL_INDEX8_OES 0x8D48 +#endif /* GL_OES_texture_stencil8 */ + +#ifndef GL_OES_texture_storage_multisample_2d_array +#define GL_OES_texture_storage_multisample_2d_array 1 +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES 0x9102 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES 0x9105 +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910B +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910C +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910D +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DMULTISAMPLEOESPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexStorage3DMultisampleOES (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#endif +#endif /* GL_OES_texture_storage_multisample_2d_array */ + +#ifndef GL_OES_texture_view +#define GL_OES_texture_view 1 +#define GL_TEXTURE_VIEW_MIN_LEVEL_OES 0x82DB +#define GL_TEXTURE_VIEW_NUM_LEVELS_OES 0x82DC +#define GL_TEXTURE_VIEW_MIN_LAYER_OES 0x82DD +#define GL_TEXTURE_VIEW_NUM_LAYERS_OES 0x82DE +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +typedef void (GL_APIENTRYP PFNGLTEXTUREVIEWOESPROC) (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTextureViewOES (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#endif +#endif /* GL_OES_texture_view */ + +#ifndef GL_OES_vertex_array_object +#define GL_OES_vertex_array_object 1 +#define GL_VERTEX_ARRAY_BINDING_OES 0x85B5 +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYOESPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSOESPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSOESPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYOESPROC) (GLuint array); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBindVertexArrayOES (GLuint array); +GL_APICALL void GL_APIENTRY glDeleteVertexArraysOES (GLsizei n, const GLuint *arrays); +GL_APICALL void GL_APIENTRY glGenVertexArraysOES (GLsizei n, GLuint *arrays); +GL_APICALL GLboolean GL_APIENTRY glIsVertexArrayOES (GLuint array); +#endif +#endif /* GL_OES_vertex_array_object */ + +#ifndef GL_OES_vertex_half_float +#define GL_OES_vertex_half_float 1 +#endif /* GL_OES_vertex_half_float */ + +#ifndef GL_OES_vertex_type_10_10_10_2 +#define GL_OES_vertex_type_10_10_10_2 1 +#define GL_UNSIGNED_INT_10_10_10_2_OES 0x8DF6 +#define GL_INT_10_10_10_2_OES 0x8DF7 +#endif /* GL_OES_vertex_type_10_10_10_2 */ + +#ifndef GL_OES_viewport_array +#define GL_OES_viewport_array 1 +#define GL_MAX_VIEWPORTS_OES 0x825B +#define GL_VIEWPORT_SUBPIXEL_BITS_OES 0x825C +#define GL_VIEWPORT_BOUNDS_RANGE_OES 0x825D +#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX_OES 0x825F +typedef void (GL_APIENTRYP PFNGLVIEWPORTARRAYVOESPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFOESPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFVOESPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLSCISSORARRAYVOESPROC) (GLuint first, GLsizei count, const GLint *v); +typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDOESPROC) (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDVOESPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEARRAYFVOESPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEINDEXEDFOESPROC) (GLuint index, GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLGETFLOATI_VOESPROC) (GLenum target, GLuint index, GLfloat *data); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glViewportArrayvOES (GLuint first, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glViewportIndexedfOES (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +GL_APICALL void GL_APIENTRY glViewportIndexedfvOES (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glScissorArrayvOES (GLuint first, GLsizei count, const GLint *v); +GL_APICALL void GL_APIENTRY glScissorIndexedOES (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glScissorIndexedvOES (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glDepthRangeArrayfvOES (GLuint first, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glDepthRangeIndexedfOES (GLuint index, GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glGetFloati_vOES (GLenum target, GLuint index, GLfloat *data); +#endif +#endif /* GL_OES_viewport_array */ + +#ifndef GL_AMD_compressed_3DC_texture +#define GL_AMD_compressed_3DC_texture 1 +#define GL_3DC_X_AMD 0x87F9 +#define GL_3DC_XY_AMD 0x87FA +#endif /* GL_AMD_compressed_3DC_texture */ + +#ifndef GL_AMD_compressed_ATC_texture +#define GL_AMD_compressed_ATC_texture 1 +#define GL_ATC_RGB_AMD 0x8C92 +#define GL_ATC_RGBA_EXPLICIT_ALPHA_AMD 0x8C93 +#define GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD 0x87EE +#endif /* GL_AMD_compressed_ATC_texture */ + +#ifndef GL_AMD_performance_monitor +#define GL_AMD_performance_monitor 1 +#define GL_COUNTER_TYPE_AMD 0x8BC0 +#define GL_COUNTER_RANGE_AMD 0x8BC1 +#define GL_UNSIGNED_INT64_AMD 0x8BC2 +#define GL_PERCENTAGE_AMD 0x8BC3 +#define GL_PERFMON_RESULT_AVAILABLE_AMD 0x8BC4 +#define GL_PERFMON_RESULT_SIZE_AMD 0x8BC5 +#define GL_PERFMON_RESULT_AMD 0x8BC6 +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORGROUPSAMDPROC) (GLint *numGroups, GLsizei groupsSize, GLuint *groups); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERSAMDPROC) (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORGROUPSTRINGAMDPROC) (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERSTRINGAMDPROC) (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERINFOAMDPROC) (GLuint group, GLuint counter, GLenum pname, void *data); +typedef void (GL_APIENTRYP PFNGLGENPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors); +typedef void (GL_APIENTRYP PFNGLDELETEPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors); +typedef void (GL_APIENTRYP PFNGLSELECTPERFMONITORCOUNTERSAMDPROC) (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList); +typedef void (GL_APIENTRYP PFNGLBEGINPERFMONITORAMDPROC) (GLuint monitor); +typedef void (GL_APIENTRYP PFNGLENDPERFMONITORAMDPROC) (GLuint monitor); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERDATAAMDPROC) (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetPerfMonitorGroupsAMD (GLint *numGroups, GLsizei groupsSize, GLuint *groups); +GL_APICALL void GL_APIENTRY glGetPerfMonitorCountersAMD (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters); +GL_APICALL void GL_APIENTRY glGetPerfMonitorGroupStringAMD (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString); +GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterStringAMD (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString); +GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterInfoAMD (GLuint group, GLuint counter, GLenum pname, void *data); +GL_APICALL void GL_APIENTRY glGenPerfMonitorsAMD (GLsizei n, GLuint *monitors); +GL_APICALL void GL_APIENTRY glDeletePerfMonitorsAMD (GLsizei n, GLuint *monitors); +GL_APICALL void GL_APIENTRY glSelectPerfMonitorCountersAMD (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList); +GL_APICALL void GL_APIENTRY glBeginPerfMonitorAMD (GLuint monitor); +GL_APICALL void GL_APIENTRY glEndPerfMonitorAMD (GLuint monitor); +GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterDataAMD (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten); +#endif +#endif /* GL_AMD_performance_monitor */ + +#ifndef GL_AMD_program_binary_Z400 +#define GL_AMD_program_binary_Z400 1 +#define GL_Z400_BINARY_AMD 0x8740 +#endif /* GL_AMD_program_binary_Z400 */ + +#ifndef GL_ANDROID_extension_pack_es31a +#define GL_ANDROID_extension_pack_es31a 1 +#endif /* GL_ANDROID_extension_pack_es31a */ + +#ifndef GL_ANGLE_depth_texture +#define GL_ANGLE_depth_texture 1 +#endif /* GL_ANGLE_depth_texture */ + +#ifndef GL_ANGLE_framebuffer_blit +#define GL_ANGLE_framebuffer_blit 1 +#define GL_READ_FRAMEBUFFER_ANGLE 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_ANGLE 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_ANGLE 0x8CA6 +#define GL_READ_FRAMEBUFFER_BINDING_ANGLE 0x8CAA +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERANGLEPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlitFramebufferANGLE (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#endif +#endif /* GL_ANGLE_framebuffer_blit */ + +#ifndef GL_ANGLE_framebuffer_multisample +#define GL_ANGLE_framebuffer_multisample 1 +#define GL_RENDERBUFFER_SAMPLES_ANGLE 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_ANGLE 0x8D56 +#define GL_MAX_SAMPLES_ANGLE 0x8D57 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEANGLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleANGLE (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#endif +#endif /* GL_ANGLE_framebuffer_multisample */ + +#ifndef GL_ANGLE_instanced_arrays +#define GL_ANGLE_instanced_arrays 1 +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE 0x88FE +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDANGLEPROC) (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDANGLEPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORANGLEPROC) (GLuint index, GLuint divisor); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawArraysInstancedANGLE (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedANGLE (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +GL_APICALL void GL_APIENTRY glVertexAttribDivisorANGLE (GLuint index, GLuint divisor); +#endif +#endif /* GL_ANGLE_instanced_arrays */ + +#ifndef GL_ANGLE_pack_reverse_row_order +#define GL_ANGLE_pack_reverse_row_order 1 +#define GL_PACK_REVERSE_ROW_ORDER_ANGLE 0x93A4 +#endif /* GL_ANGLE_pack_reverse_row_order */ + +#ifndef GL_ANGLE_program_binary +#define GL_ANGLE_program_binary 1 +#define GL_PROGRAM_BINARY_ANGLE 0x93A6 +#endif /* GL_ANGLE_program_binary */ + +#ifndef GL_ANGLE_texture_compression_dxt3 +#define GL_ANGLE_texture_compression_dxt3 1 +#define GL_COMPRESSED_RGBA_S3TC_DXT3_ANGLE 0x83F2 +#endif /* GL_ANGLE_texture_compression_dxt3 */ + +#ifndef GL_ANGLE_texture_compression_dxt5 +#define GL_ANGLE_texture_compression_dxt5 1 +#define GL_COMPRESSED_RGBA_S3TC_DXT5_ANGLE 0x83F3 +#endif /* GL_ANGLE_texture_compression_dxt5 */ + +#ifndef GL_ANGLE_texture_usage +#define GL_ANGLE_texture_usage 1 +#define GL_TEXTURE_USAGE_ANGLE 0x93A2 +#define GL_FRAMEBUFFER_ATTACHMENT_ANGLE 0x93A3 +#endif /* GL_ANGLE_texture_usage */ + +#ifndef GL_ANGLE_translated_shader_source +#define GL_ANGLE_translated_shader_source 1 +#define GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE 0x93A0 +typedef void (GL_APIENTRYP PFNGLGETTRANSLATEDSHADERSOURCEANGLEPROC) (GLuint shader, GLsizei bufsize, GLsizei *length, GLchar *source); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetTranslatedShaderSourceANGLE (GLuint shader, GLsizei bufsize, GLsizei *length, GLchar *source); +#endif +#endif /* GL_ANGLE_translated_shader_source */ + +#ifndef GL_APPLE_clip_distance +#define GL_APPLE_clip_distance 1 +#define GL_MAX_CLIP_DISTANCES_APPLE 0x0D32 +#define GL_CLIP_DISTANCE0_APPLE 0x3000 +#define GL_CLIP_DISTANCE1_APPLE 0x3001 +#define GL_CLIP_DISTANCE2_APPLE 0x3002 +#define GL_CLIP_DISTANCE3_APPLE 0x3003 +#define GL_CLIP_DISTANCE4_APPLE 0x3004 +#define GL_CLIP_DISTANCE5_APPLE 0x3005 +#define GL_CLIP_DISTANCE6_APPLE 0x3006 +#define GL_CLIP_DISTANCE7_APPLE 0x3007 +#endif /* GL_APPLE_clip_distance */ + +#ifndef GL_APPLE_color_buffer_packed_float +#define GL_APPLE_color_buffer_packed_float 1 +#endif /* GL_APPLE_color_buffer_packed_float */ + +#ifndef GL_APPLE_copy_texture_levels +#define GL_APPLE_copy_texture_levels 1 +typedef void (GL_APIENTRYP PFNGLCOPYTEXTURELEVELSAPPLEPROC) (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCopyTextureLevelsAPPLE (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount); +#endif +#endif /* GL_APPLE_copy_texture_levels */ + +#ifndef GL_APPLE_framebuffer_multisample +#define GL_APPLE_framebuffer_multisample 1 +#define GL_RENDERBUFFER_SAMPLES_APPLE 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE 0x8D56 +#define GL_MAX_SAMPLES_APPLE 0x8D57 +#define GL_READ_FRAMEBUFFER_APPLE 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_APPLE 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_APPLE 0x8CA6 +#define GL_READ_FRAMEBUFFER_BINDING_APPLE 0x8CAA +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEAPPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLRESOLVEMULTISAMPLEFRAMEBUFFERAPPLEPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleAPPLE (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glResolveMultisampleFramebufferAPPLE (void); +#endif +#endif /* GL_APPLE_framebuffer_multisample */ + +#ifndef GL_APPLE_rgb_422 +#define GL_APPLE_rgb_422 1 +#define GL_RGB_422_APPLE 0x8A1F +#define GL_UNSIGNED_SHORT_8_8_APPLE 0x85BA +#define GL_UNSIGNED_SHORT_8_8_REV_APPLE 0x85BB +#define GL_RGB_RAW_422_APPLE 0x8A51 +#endif /* GL_APPLE_rgb_422 */ + +#ifndef GL_APPLE_sync +#define GL_APPLE_sync 1 +#define GL_SYNC_OBJECT_APPLE 0x8A53 +#define GL_MAX_SERVER_WAIT_TIMEOUT_APPLE 0x9111 +#define GL_OBJECT_TYPE_APPLE 0x9112 +#define GL_SYNC_CONDITION_APPLE 0x9113 +#define GL_SYNC_STATUS_APPLE 0x9114 +#define GL_SYNC_FLAGS_APPLE 0x9115 +#define GL_SYNC_FENCE_APPLE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE_APPLE 0x9117 +#define GL_UNSIGNALED_APPLE 0x9118 +#define GL_SIGNALED_APPLE 0x9119 +#define GL_ALREADY_SIGNALED_APPLE 0x911A +#define GL_TIMEOUT_EXPIRED_APPLE 0x911B +#define GL_CONDITION_SATISFIED_APPLE 0x911C +#define GL_WAIT_FAILED_APPLE 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT_APPLE 0x00000001 +#define GL_TIMEOUT_IGNORED_APPLE 0xFFFFFFFFFFFFFFFFull +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCAPPLEPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCAPPLEPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCAPPLEPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VAPPLEPROC) (GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVAPPLEPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLsync GL_APIENTRY glFenceSyncAPPLE (GLenum condition, GLbitfield flags); +GL_APICALL GLboolean GL_APIENTRY glIsSyncAPPLE (GLsync sync); +GL_APICALL void GL_APIENTRY glDeleteSyncAPPLE (GLsync sync); +GL_APICALL GLenum GL_APIENTRY glClientWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glGetInteger64vAPPLE (GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGetSyncivAPPLE (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#endif +#endif /* GL_APPLE_sync */ + +#ifndef GL_APPLE_texture_format_BGRA8888 +#define GL_APPLE_texture_format_BGRA8888 1 +#define GL_BGRA_EXT 0x80E1 +#define GL_BGRA8_EXT 0x93A1 +#endif /* GL_APPLE_texture_format_BGRA8888 */ + +#ifndef GL_APPLE_texture_max_level +#define GL_APPLE_texture_max_level 1 +#define GL_TEXTURE_MAX_LEVEL_APPLE 0x813D +#endif /* GL_APPLE_texture_max_level */ + +#ifndef GL_APPLE_texture_packed_float +#define GL_APPLE_texture_packed_float 1 +#define GL_UNSIGNED_INT_10F_11F_11F_REV_APPLE 0x8C3B +#define GL_UNSIGNED_INT_5_9_9_9_REV_APPLE 0x8C3E +#define GL_R11F_G11F_B10F_APPLE 0x8C3A +#define GL_RGB9_E5_APPLE 0x8C3D +#endif /* GL_APPLE_texture_packed_float */ + +#ifndef GL_ARM_mali_program_binary +#define GL_ARM_mali_program_binary 1 +#define GL_MALI_PROGRAM_BINARY_ARM 0x8F61 +#endif /* GL_ARM_mali_program_binary */ + +#ifndef GL_ARM_mali_shader_binary +#define GL_ARM_mali_shader_binary 1 +#define GL_MALI_SHADER_BINARY_ARM 0x8F60 +#endif /* GL_ARM_mali_shader_binary */ + +#ifndef GL_ARM_rgba8 +#define GL_ARM_rgba8 1 +#endif /* GL_ARM_rgba8 */ + +#ifndef GL_ARM_shader_framebuffer_fetch +#define GL_ARM_shader_framebuffer_fetch 1 +#define GL_FETCH_PER_SAMPLE_ARM 0x8F65 +#define GL_FRAGMENT_SHADER_FRAMEBUFFER_FETCH_MRT_ARM 0x8F66 +#endif /* GL_ARM_shader_framebuffer_fetch */ + +#ifndef GL_ARM_shader_framebuffer_fetch_depth_stencil +#define GL_ARM_shader_framebuffer_fetch_depth_stencil 1 +#endif /* GL_ARM_shader_framebuffer_fetch_depth_stencil */ + +#ifndef GL_DMP_program_binary +#define GL_DMP_program_binary 1 +#define GL_SMAPHS30_PROGRAM_BINARY_DMP 0x9251 +#define GL_SMAPHS_PROGRAM_BINARY_DMP 0x9252 +#define GL_DMP_PROGRAM_BINARY_DMP 0x9253 +#endif /* GL_DMP_program_binary */ + +#ifndef GL_DMP_shader_binary +#define GL_DMP_shader_binary 1 +#define GL_SHADER_BINARY_DMP 0x9250 +#endif /* GL_DMP_shader_binary */ + +#ifndef GL_EXT_EGL_image_array +#define GL_EXT_EGL_image_array 1 +#endif /* GL_EXT_EGL_image_array */ + +#ifndef GL_EXT_YUV_target +#define GL_EXT_YUV_target 1 +#define GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT 0x8BE7 +#endif /* GL_EXT_YUV_target */ + +#ifndef GL_EXT_base_instance +#define GL_EXT_base_instance 1 +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEEXTPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawArraysInstancedBaseInstanceEXT (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseInstanceEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexBaseInstanceEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance); +#endif +#endif /* GL_EXT_base_instance */ + +#ifndef GL_EXT_blend_func_extended +#define GL_EXT_blend_func_extended 1 +#define GL_SRC1_COLOR_EXT 0x88F9 +#define GL_SRC1_ALPHA_EXT 0x8589 +#define GL_ONE_MINUS_SRC1_COLOR_EXT 0x88FA +#define GL_ONE_MINUS_SRC1_ALPHA_EXT 0x88FB +#define GL_SRC_ALPHA_SATURATE_EXT 0x0308 +#define GL_LOCATION_INDEX_EXT 0x930F +#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS_EXT 0x88FC +typedef void (GL_APIENTRYP PFNGLBINDFRAGDATALOCATIONINDEXEDEXTPROC) (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDFRAGDATALOCATIONEXTPROC) (GLuint program, GLuint color, const GLchar *name); +typedef GLint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONINDEXEXTPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATAINDEXEXTPROC) (GLuint program, const GLchar *name); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBindFragDataLocationIndexedEXT (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindFragDataLocationEXT (GLuint program, GLuint color, const GLchar *name); +GL_APICALL GLint GL_APIENTRY glGetProgramResourceLocationIndexEXT (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL GLint GL_APIENTRY glGetFragDataIndexEXT (GLuint program, const GLchar *name); +#endif +#endif /* GL_EXT_blend_func_extended */ + +#ifndef GL_EXT_blend_minmax +#define GL_EXT_blend_minmax 1 +#define GL_MIN_EXT 0x8007 +#define GL_MAX_EXT 0x8008 +#endif /* GL_EXT_blend_minmax */ + +#ifndef GL_EXT_buffer_storage +#define GL_EXT_buffer_storage 1 +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_PERSISTENT_BIT_EXT 0x0040 +#define GL_MAP_COHERENT_BIT_EXT 0x0080 +#define GL_DYNAMIC_STORAGE_BIT_EXT 0x0100 +#define GL_CLIENT_STORAGE_BIT_EXT 0x0200 +#define GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT 0x00004000 +#define GL_BUFFER_IMMUTABLE_STORAGE_EXT 0x821F +#define GL_BUFFER_STORAGE_FLAGS_EXT 0x8220 +typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEEXTPROC) (GLenum target, GLsizeiptr size, const void *data, GLbitfield flags); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBufferStorageEXT (GLenum target, GLsizeiptr size, const void *data, GLbitfield flags); +#endif +#endif /* GL_EXT_buffer_storage */ + +#ifndef GL_EXT_clear_texture +#define GL_EXT_clear_texture 1 +typedef void (GL_APIENTRYP PFNGLCLEARTEXIMAGEEXTPROC) (GLuint texture, GLint level, GLenum format, GLenum type, const void *data); +typedef void (GL_APIENTRYP PFNGLCLEARTEXSUBIMAGEEXTPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glClearTexImageEXT (GLuint texture, GLint level, GLenum format, GLenum type, const void *data); +GL_APICALL void GL_APIENTRY glClearTexSubImageEXT (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data); +#endif +#endif /* GL_EXT_clear_texture */ + +#ifndef GL_EXT_clip_cull_distance +#define GL_EXT_clip_cull_distance 1 +#define GL_MAX_CLIP_DISTANCES_EXT 0x0D32 +#define GL_MAX_CULL_DISTANCES_EXT 0x82F9 +#define GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES_EXT 0x82FA +#define GL_CLIP_DISTANCE0_EXT 0x3000 +#define GL_CLIP_DISTANCE1_EXT 0x3001 +#define GL_CLIP_DISTANCE2_EXT 0x3002 +#define GL_CLIP_DISTANCE3_EXT 0x3003 +#define GL_CLIP_DISTANCE4_EXT 0x3004 +#define GL_CLIP_DISTANCE5_EXT 0x3005 +#define GL_CLIP_DISTANCE6_EXT 0x3006 +#define GL_CLIP_DISTANCE7_EXT 0x3007 +#endif /* GL_EXT_clip_cull_distance */ + +#ifndef GL_EXT_color_buffer_float +#define GL_EXT_color_buffer_float 1 +#endif /* GL_EXT_color_buffer_float */ + +#ifndef GL_EXT_color_buffer_half_float +#define GL_EXT_color_buffer_half_float 1 +#define GL_RGBA16F_EXT 0x881A +#define GL_RGB16F_EXT 0x881B +#define GL_RG16F_EXT 0x822F +#define GL_R16F_EXT 0x822D +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE_EXT 0x8211 +#define GL_UNSIGNED_NORMALIZED_EXT 0x8C17 +#endif /* GL_EXT_color_buffer_half_float */ + +#ifndef GL_EXT_conservative_depth +#define GL_EXT_conservative_depth 1 +#endif /* GL_EXT_conservative_depth */ + +#ifndef GL_EXT_copy_image +#define GL_EXT_copy_image 1 +typedef void (GL_APIENTRYP PFNGLCOPYIMAGESUBDATAEXTPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCopyImageSubDataEXT (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#endif +#endif /* GL_EXT_copy_image */ + +#ifndef GL_EXT_debug_label +#define GL_EXT_debug_label 1 +#define GL_PROGRAM_PIPELINE_OBJECT_EXT 0x8A4F +#define GL_PROGRAM_OBJECT_EXT 0x8B40 +#define GL_SHADER_OBJECT_EXT 0x8B48 +#define GL_BUFFER_OBJECT_EXT 0x9151 +#define GL_QUERY_OBJECT_EXT 0x9153 +#define GL_VERTEX_ARRAY_OBJECT_EXT 0x9154 +#define GL_TRANSFORM_FEEDBACK 0x8E22 +typedef void (GL_APIENTRYP PFNGLLABELOBJECTEXTPROC) (GLenum type, GLuint object, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTLABELEXTPROC) (GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glLabelObjectEXT (GLenum type, GLuint object, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectLabelEXT (GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label); +#endif +#endif /* GL_EXT_debug_label */ + +#ifndef GL_EXT_debug_marker +#define GL_EXT_debug_marker 1 +typedef void (GL_APIENTRYP PFNGLINSERTEVENTMARKEREXTPROC) (GLsizei length, const GLchar *marker); +typedef void (GL_APIENTRYP PFNGLPUSHGROUPMARKEREXTPROC) (GLsizei length, const GLchar *marker); +typedef void (GL_APIENTRYP PFNGLPOPGROUPMARKEREXTPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glInsertEventMarkerEXT (GLsizei length, const GLchar *marker); +GL_APICALL void GL_APIENTRY glPushGroupMarkerEXT (GLsizei length, const GLchar *marker); +GL_APICALL void GL_APIENTRY glPopGroupMarkerEXT (void); +#endif +#endif /* GL_EXT_debug_marker */ + +#ifndef GL_EXT_discard_framebuffer +#define GL_EXT_discard_framebuffer 1 +#define GL_COLOR_EXT 0x1800 +#define GL_DEPTH_EXT 0x1801 +#define GL_STENCIL_EXT 0x1802 +typedef void (GL_APIENTRYP PFNGLDISCARDFRAMEBUFFEREXTPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDiscardFramebufferEXT (GLenum target, GLsizei numAttachments, const GLenum *attachments); +#endif +#endif /* GL_EXT_discard_framebuffer */ + +#ifndef GL_EXT_disjoint_timer_query +#define GL_EXT_disjoint_timer_query 1 +#define GL_QUERY_COUNTER_BITS_EXT 0x8864 +#define GL_CURRENT_QUERY_EXT 0x8865 +#define GL_QUERY_RESULT_EXT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE_EXT 0x8867 +#define GL_TIME_ELAPSED_EXT 0x88BF +#define GL_TIMESTAMP_EXT 0x8E28 +#define GL_GPU_DISJOINT_EXT 0x8FBB +typedef void (GL_APIENTRYP PFNGLGENQUERIESEXTPROC) (GLsizei n, GLuint *ids); +typedef void (GL_APIENTRYP PFNGLDELETEQUERIESEXTPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISQUERYEXTPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLBEGINQUERYEXTPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLENDQUERYEXTPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLQUERYCOUNTEREXTPROC) (GLuint id, GLenum target); +typedef void (GL_APIENTRYP PFNGLGETQUERYIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTIVEXTPROC) (GLuint id, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVEXTPROC) (GLuint id, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTI64VEXTPROC) (GLuint id, GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUI64VEXTPROC) (GLuint id, GLenum pname, GLuint64 *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGenQueriesEXT (GLsizei n, GLuint *ids); +GL_APICALL void GL_APIENTRY glDeleteQueriesEXT (GLsizei n, const GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsQueryEXT (GLuint id); +GL_APICALL void GL_APIENTRY glBeginQueryEXT (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glEndQueryEXT (GLenum target); +GL_APICALL void GL_APIENTRY glQueryCounterEXT (GLuint id, GLenum target); +GL_APICALL void GL_APIENTRY glGetQueryivEXT (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectivEXT (GLuint id, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectuivEXT (GLuint id, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjecti64vEXT (GLuint id, GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectui64vEXT (GLuint id, GLenum pname, GLuint64 *params); +#endif +#endif /* GL_EXT_disjoint_timer_query */ + +#ifndef GL_EXT_draw_buffers +#define GL_EXT_draw_buffers 1 +#define GL_MAX_COLOR_ATTACHMENTS_EXT 0x8CDF +#define GL_MAX_DRAW_BUFFERS_EXT 0x8824 +#define GL_DRAW_BUFFER0_EXT 0x8825 +#define GL_DRAW_BUFFER1_EXT 0x8826 +#define GL_DRAW_BUFFER2_EXT 0x8827 +#define GL_DRAW_BUFFER3_EXT 0x8828 +#define GL_DRAW_BUFFER4_EXT 0x8829 +#define GL_DRAW_BUFFER5_EXT 0x882A +#define GL_DRAW_BUFFER6_EXT 0x882B +#define GL_DRAW_BUFFER7_EXT 0x882C +#define GL_DRAW_BUFFER8_EXT 0x882D +#define GL_DRAW_BUFFER9_EXT 0x882E +#define GL_DRAW_BUFFER10_EXT 0x882F +#define GL_DRAW_BUFFER11_EXT 0x8830 +#define GL_DRAW_BUFFER12_EXT 0x8831 +#define GL_DRAW_BUFFER13_EXT 0x8832 +#define GL_DRAW_BUFFER14_EXT 0x8833 +#define GL_DRAW_BUFFER15_EXT 0x8834 +#define GL_COLOR_ATTACHMENT0_EXT 0x8CE0 +#define GL_COLOR_ATTACHMENT1_EXT 0x8CE1 +#define GL_COLOR_ATTACHMENT2_EXT 0x8CE2 +#define GL_COLOR_ATTACHMENT3_EXT 0x8CE3 +#define GL_COLOR_ATTACHMENT4_EXT 0x8CE4 +#define GL_COLOR_ATTACHMENT5_EXT 0x8CE5 +#define GL_COLOR_ATTACHMENT6_EXT 0x8CE6 +#define GL_COLOR_ATTACHMENT7_EXT 0x8CE7 +#define GL_COLOR_ATTACHMENT8_EXT 0x8CE8 +#define GL_COLOR_ATTACHMENT9_EXT 0x8CE9 +#define GL_COLOR_ATTACHMENT10_EXT 0x8CEA +#define GL_COLOR_ATTACHMENT11_EXT 0x8CEB +#define GL_COLOR_ATTACHMENT12_EXT 0x8CEC +#define GL_COLOR_ATTACHMENT13_EXT 0x8CED +#define GL_COLOR_ATTACHMENT14_EXT 0x8CEE +#define GL_COLOR_ATTACHMENT15_EXT 0x8CEF +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSEXTPROC) (GLsizei n, const GLenum *bufs); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawBuffersEXT (GLsizei n, const GLenum *bufs); +#endif +#endif /* GL_EXT_draw_buffers */ + +#ifndef GL_EXT_draw_buffers_indexed +#define GL_EXT_draw_buffers_indexed 1 +typedef void (GL_APIENTRYP PFNGLENABLEIEXTPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDISABLEIEXTPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONIEXTPROC) (GLuint buf, GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEIEXTPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCIEXTPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEIEXTPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +typedef void (GL_APIENTRYP PFNGLCOLORMASKIEXTPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDIEXTPROC) (GLenum target, GLuint index); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glEnableiEXT (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisableiEXT (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glBlendEquationiEXT (GLuint buf, GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparateiEXT (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunciEXT (GLuint buf, GLenum src, GLenum dst); +GL_APICALL void GL_APIENTRY glBlendFuncSeparateiEXT (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +GL_APICALL void GL_APIENTRY glColorMaskiEXT (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GL_APICALL GLboolean GL_APIENTRY glIsEnablediEXT (GLenum target, GLuint index); +#endif +#endif /* GL_EXT_draw_buffers_indexed */ + +#ifndef GL_EXT_draw_elements_base_vertex +#define GL_EXT_draw_elements_base_vertex 1 +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXEXTPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSBASEVERTEXEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawElementsBaseVertexEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawRangeElementsBaseVertexEXT (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +GL_APICALL void GL_APIENTRY glMultiDrawElementsBaseVertexEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex); +#endif +#endif /* GL_EXT_draw_elements_base_vertex */ + +#ifndef GL_EXT_draw_instanced +#define GL_EXT_draw_instanced 1 +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDEXTPROC) (GLenum mode, GLint start, GLsizei count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawArraysInstancedEXT (GLenum mode, GLint start, GLsizei count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +#endif +#endif /* GL_EXT_draw_instanced */ + +#ifndef GL_EXT_draw_transform_feedback +#define GL_EXT_draw_transform_feedback 1 +typedef void (GL_APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKEXTPROC) (GLenum mode, GLuint id); +typedef void (GL_APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDEXTPROC) (GLenum mode, GLuint id, GLsizei instancecount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawTransformFeedbackEXT (GLenum mode, GLuint id); +GL_APICALL void GL_APIENTRY glDrawTransformFeedbackInstancedEXT (GLenum mode, GLuint id, GLsizei instancecount); +#endif +#endif /* GL_EXT_draw_transform_feedback */ + +#ifndef GL_EXT_external_buffer +#define GL_EXT_external_buffer 1 +typedef void *GLeglClientBufferEXT; +typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEEXTERNALEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags); +typedef void (GL_APIENTRYP PFNGLNAMEDBUFFERSTORAGEEXTERNALEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBufferStorageExternalEXT (GLenum target, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags); +GL_APICALL void GL_APIENTRY glNamedBufferStorageExternalEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags); +#endif +#endif /* GL_EXT_external_buffer */ + +#ifndef GL_EXT_float_blend +#define GL_EXT_float_blend 1 +#endif /* GL_EXT_float_blend */ + +#ifndef GL_EXT_geometry_point_size +#define GL_EXT_geometry_point_size 1 +#endif /* GL_EXT_geometry_point_size */ + +#ifndef GL_EXT_geometry_shader +#define GL_EXT_geometry_shader 1 +#define GL_GEOMETRY_SHADER_EXT 0x8DD9 +#define GL_GEOMETRY_SHADER_BIT_EXT 0x00000004 +#define GL_GEOMETRY_LINKED_VERTICES_OUT_EXT 0x8916 +#define GL_GEOMETRY_LINKED_INPUT_TYPE_EXT 0x8917 +#define GL_GEOMETRY_LINKED_OUTPUT_TYPE_EXT 0x8918 +#define GL_GEOMETRY_SHADER_INVOCATIONS_EXT 0x887F +#define GL_LAYER_PROVOKING_VERTEX_EXT 0x825E +#define GL_LINES_ADJACENCY_EXT 0x000A +#define GL_LINE_STRIP_ADJACENCY_EXT 0x000B +#define GL_TRIANGLES_ADJACENCY_EXT 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY_EXT 0x000D +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT 0x8DDF +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS_EXT 0x8A2C +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_EXT 0x8A32 +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS_EXT 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_EXT 0x9124 +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT 0x8DE1 +#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS_EXT 0x8E5A +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT 0x8C29 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_EXT 0x92CF +#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS_EXT 0x92D5 +#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS_EXT 0x90CD +#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_EXT 0x90D7 +#define GL_FIRST_VERTEX_CONVENTION_EXT 0x8E4D +#define GL_LAST_VERTEX_CONVENTION_EXT 0x8E4E +#define GL_UNDEFINED_VERTEX_EXT 0x8260 +#define GL_PRIMITIVES_GENERATED_EXT 0x8C87 +#define GL_FRAMEBUFFER_DEFAULT_LAYERS_EXT 0x9312 +#define GL_MAX_FRAMEBUFFER_LAYERS_EXT 0x9317 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT 0x8DA8 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT 0x8DA7 +#define GL_REFERENCED_BY_GEOMETRY_SHADER_EXT 0x9309 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREEXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTextureEXT (GLenum target, GLenum attachment, GLuint texture, GLint level); +#endif +#endif /* GL_EXT_geometry_shader */ + +#ifndef GL_EXT_gpu_shader5 +#define GL_EXT_gpu_shader5 1 +#endif /* GL_EXT_gpu_shader5 */ + +#ifndef GL_EXT_instanced_arrays +#define GL_EXT_instanced_arrays 1 +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_EXT 0x88FE +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISOREXTPROC) (GLuint index, GLuint divisor); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glVertexAttribDivisorEXT (GLuint index, GLuint divisor); +#endif +#endif /* GL_EXT_instanced_arrays */ + +#ifndef GL_EXT_map_buffer_range +#define GL_EXT_map_buffer_range 1 +#define GL_MAP_READ_BIT_EXT 0x0001 +#define GL_MAP_WRITE_BIT_EXT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT_EXT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT_EXT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT_EXT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT_EXT 0x0020 +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void *GL_APIENTRY glMapBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_APICALL void GL_APIENTRY glFlushMappedBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length); +#endif +#endif /* GL_EXT_map_buffer_range */ + +#ifndef GL_EXT_memory_object +#define GL_EXT_memory_object 1 +#define GL_TEXTURE_TILING_EXT 0x9580 +#define GL_DEDICATED_MEMORY_OBJECT_EXT 0x9581 +#define GL_PROTECTED_MEMORY_OBJECT_EXT 0x959B +#define GL_NUM_TILING_TYPES_EXT 0x9582 +#define GL_TILING_TYPES_EXT 0x9583 +#define GL_OPTIMAL_TILING_EXT 0x9584 +#define GL_LINEAR_TILING_EXT 0x9585 +#define GL_NUM_DEVICE_UUIDS_EXT 0x9596 +#define GL_DEVICE_UUID_EXT 0x9597 +#define GL_DRIVER_UUID_EXT 0x9598 +#define GL_UUID_SIZE_EXT 16 +typedef void (GL_APIENTRYP PFNGLGETUNSIGNEDBYTEVEXTPROC) (GLenum pname, GLubyte *data); +typedef void (GL_APIENTRYP PFNGLGETUNSIGNEDBYTEI_VEXTPROC) (GLenum target, GLuint index, GLubyte *data); +typedef void (GL_APIENTRYP PFNGLDELETEMEMORYOBJECTSEXTPROC) (GLsizei n, const GLuint *memoryObjects); +typedef GLboolean (GL_APIENTRYP PFNGLISMEMORYOBJECTEXTPROC) (GLuint memoryObject); +typedef void (GL_APIENTRYP PFNGLCREATEMEMORYOBJECTSEXTPROC) (GLsizei n, GLuint *memoryObjects); +typedef void (GL_APIENTRYP PFNGLMEMORYOBJECTPARAMETERIVEXTPROC) (GLuint memoryObject, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLGETMEMORYOBJECTPARAMETERIVEXTPROC) (GLuint memoryObject, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM2DMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM3DMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEMEMEXTPROC) (GLenum target, GLsizeiptr size, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM2DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM2DMULTISAMPLEEXTPROC) (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM3DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM3DMULTISAMPLEEXTPROC) (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLNAMEDBUFFERSTORAGEMEMEXTPROC) (GLuint buffer, GLsizeiptr size, GLuint memory, GLuint64 offset); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetUnsignedBytevEXT (GLenum pname, GLubyte *data); +GL_APICALL void GL_APIENTRY glGetUnsignedBytei_vEXT (GLenum target, GLuint index, GLubyte *data); +GL_APICALL void GL_APIENTRY glDeleteMemoryObjectsEXT (GLsizei n, const GLuint *memoryObjects); +GL_APICALL GLboolean GL_APIENTRY glIsMemoryObjectEXT (GLuint memoryObject); +GL_APICALL void GL_APIENTRY glCreateMemoryObjectsEXT (GLsizei n, GLuint *memoryObjects); +GL_APICALL void GL_APIENTRY glMemoryObjectParameterivEXT (GLuint memoryObject, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glGetMemoryObjectParameterivEXT (GLuint memoryObject, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glTexStorageMem2DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTexStorageMem2DMultisampleEXT (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTexStorageMem3DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTexStorageMem3DMultisampleEXT (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glBufferStorageMemEXT (GLenum target, GLsizeiptr size, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTextureStorageMem2DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTextureStorageMem2DMultisampleEXT (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTextureStorageMem3DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTextureStorageMem3DMultisampleEXT (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glNamedBufferStorageMemEXT (GLuint buffer, GLsizeiptr size, GLuint memory, GLuint64 offset); +#endif +#endif /* GL_EXT_memory_object */ + +#ifndef GL_EXT_memory_object_fd +#define GL_EXT_memory_object_fd 1 +#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586 +typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYFDEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, GLint fd); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glImportMemoryFdEXT (GLuint memory, GLuint64 size, GLenum handleType, GLint fd); +#endif +#endif /* GL_EXT_memory_object_fd */ + +#ifndef GL_EXT_memory_object_win32 +#define GL_EXT_memory_object_win32 1 +#define GL_HANDLE_TYPE_OPAQUE_WIN32_EXT 0x9587 +#define GL_HANDLE_TYPE_OPAQUE_WIN32_KMT_EXT 0x9588 +#define GL_DEVICE_LUID_EXT 0x9599 +#define GL_DEVICE_NODE_MASK_EXT 0x959A +#define GL_LUID_SIZE_EXT 8 +#define GL_HANDLE_TYPE_D3D12_TILEPOOL_EXT 0x9589 +#define GL_HANDLE_TYPE_D3D12_RESOURCE_EXT 0x958A +#define GL_HANDLE_TYPE_D3D11_IMAGE_EXT 0x958B +#define GL_HANDLE_TYPE_D3D11_IMAGE_KMT_EXT 0x958C +typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYWIN32HANDLEEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, void *handle); +typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYWIN32NAMEEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, const void *name); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glImportMemoryWin32HandleEXT (GLuint memory, GLuint64 size, GLenum handleType, void *handle); +GL_APICALL void GL_APIENTRY glImportMemoryWin32NameEXT (GLuint memory, GLuint64 size, GLenum handleType, const void *name); +#endif +#endif /* GL_EXT_memory_object_win32 */ + +#ifndef GL_EXT_multi_draw_arrays +#define GL_EXT_multi_draw_arrays 1 +typedef void (GL_APIENTRYP PFNGLMULTIDRAWARRAYSEXTPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glMultiDrawArraysEXT (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glMultiDrawElementsEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount); +#endif +#endif /* GL_EXT_multi_draw_arrays */ + +#ifndef GL_EXT_multi_draw_indirect +#define GL_EXT_multi_draw_indirect 1 +typedef void (GL_APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTEXTPROC) (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTEXTPROC) (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glMultiDrawArraysIndirectEXT (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride); +GL_APICALL void GL_APIENTRY glMultiDrawElementsIndirectEXT (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride); +#endif +#endif /* GL_EXT_multi_draw_indirect */ + +#ifndef GL_EXT_multisampled_compatibility +#define GL_EXT_multisampled_compatibility 1 +#define GL_MULTISAMPLE_EXT 0x809D +#define GL_SAMPLE_ALPHA_TO_ONE_EXT 0x809F +#endif /* GL_EXT_multisampled_compatibility */ + +#ifndef GL_EXT_multisampled_render_to_texture +#define GL_EXT_multisampled_render_to_texture 1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT 0x8D6C +#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT 0x8D56 +#define GL_MAX_SAMPLES_EXT 0x8D57 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleEXT (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTexture2DMultisampleEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#endif +#endif /* GL_EXT_multisampled_render_to_texture */ + +#ifndef GL_EXT_multiview_draw_buffers +#define GL_EXT_multiview_draw_buffers 1 +#define GL_COLOR_ATTACHMENT_EXT 0x90F0 +#define GL_MULTIVIEW_EXT 0x90F1 +#define GL_DRAW_BUFFER_EXT 0x0C01 +#define GL_READ_BUFFER_EXT 0x0C02 +#define GL_MAX_MULTIVIEW_BUFFERS_EXT 0x90F2 +typedef void (GL_APIENTRYP PFNGLREADBUFFERINDEXEDEXTPROC) (GLenum src, GLint index); +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSINDEXEDEXTPROC) (GLint n, const GLenum *location, const GLint *indices); +typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VEXTPROC) (GLenum target, GLuint index, GLint *data); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBufferIndexedEXT (GLenum src, GLint index); +GL_APICALL void GL_APIENTRY glDrawBuffersIndexedEXT (GLint n, const GLenum *location, const GLint *indices); +GL_APICALL void GL_APIENTRY glGetIntegeri_vEXT (GLenum target, GLuint index, GLint *data); +#endif +#endif /* GL_EXT_multiview_draw_buffers */ + +#ifndef GL_EXT_occlusion_query_boolean +#define GL_EXT_occlusion_query_boolean 1 +#define GL_ANY_SAMPLES_PASSED_EXT 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT 0x8D6A +#endif /* GL_EXT_occlusion_query_boolean */ + +#ifndef GL_EXT_polygon_offset_clamp +#define GL_EXT_polygon_offset_clamp 1 +#define GL_POLYGON_OFFSET_CLAMP_EXT 0x8E1B +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETCLAMPEXTPROC) (GLfloat factor, GLfloat units, GLfloat clamp); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPolygonOffsetClampEXT (GLfloat factor, GLfloat units, GLfloat clamp); +#endif +#endif /* GL_EXT_polygon_offset_clamp */ + +#ifndef GL_EXT_post_depth_coverage +#define GL_EXT_post_depth_coverage 1 +#endif /* GL_EXT_post_depth_coverage */ + +#ifndef GL_EXT_primitive_bounding_box +#define GL_EXT_primitive_bounding_box 1 +#define GL_PRIMITIVE_BOUNDING_BOX_EXT 0x92BE +typedef void (GL_APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXEXTPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPrimitiveBoundingBoxEXT (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +#endif +#endif /* GL_EXT_primitive_bounding_box */ + +#ifndef GL_EXT_protected_textures +#define GL_EXT_protected_textures 1 +#define GL_CONTEXT_FLAG_PROTECTED_CONTENT_BIT_EXT 0x00000010 +#define GL_TEXTURE_PROTECTED_EXT 0x8BFA +#endif /* GL_EXT_protected_textures */ + +#ifndef GL_EXT_pvrtc_sRGB +#define GL_EXT_pvrtc_sRGB 1 +#define GL_COMPRESSED_SRGB_PVRTC_2BPPV1_EXT 0x8A54 +#define GL_COMPRESSED_SRGB_PVRTC_4BPPV1_EXT 0x8A55 +#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1_EXT 0x8A56 +#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT 0x8A57 +#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2_IMG 0x93F0 +#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2_IMG 0x93F1 +#endif /* GL_EXT_pvrtc_sRGB */ + +#ifndef GL_EXT_raster_multisample +#define GL_EXT_raster_multisample 1 +#define GL_RASTER_MULTISAMPLE_EXT 0x9327 +#define GL_RASTER_SAMPLES_EXT 0x9328 +#define GL_MAX_RASTER_SAMPLES_EXT 0x9329 +#define GL_RASTER_FIXED_SAMPLE_LOCATIONS_EXT 0x932A +#define GL_MULTISAMPLE_RASTERIZATION_ALLOWED_EXT 0x932B +#define GL_EFFECTIVE_RASTER_SAMPLES_EXT 0x932C +typedef void (GL_APIENTRYP PFNGLRASTERSAMPLESEXTPROC) (GLuint samples, GLboolean fixedsamplelocations); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRasterSamplesEXT (GLuint samples, GLboolean fixedsamplelocations); +#endif +#endif /* GL_EXT_raster_multisample */ + +#ifndef GL_EXT_read_format_bgra +#define GL_EXT_read_format_bgra 1 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV_EXT 0x8365 +#define GL_UNSIGNED_SHORT_1_5_5_5_REV_EXT 0x8366 +#endif /* GL_EXT_read_format_bgra */ + +#ifndef GL_EXT_render_snorm +#define GL_EXT_render_snorm 1 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_R16_SNORM_EXT 0x8F98 +#define GL_RG16_SNORM_EXT 0x8F99 +#define GL_RGBA16_SNORM_EXT 0x8F9B +#endif /* GL_EXT_render_snorm */ + +#ifndef GL_EXT_robustness +#define GL_EXT_robustness 1 +#define GL_GUILTY_CONTEXT_RESET_EXT 0x8253 +#define GL_INNOCENT_CONTEXT_RESET_EXT 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET_EXT 0x8255 +#define GL_CONTEXT_ROBUST_ACCESS_EXT 0x90F3 +#define GL_RESET_NOTIFICATION_STRATEGY_EXT 0x8256 +#define GL_LOSE_CONTEXT_ON_RESET_EXT 0x8252 +#define GL_NO_RESET_NOTIFICATION_EXT 0x8261 +typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSEXTPROC) (void); +typedef void (GL_APIENTRYP PFNGLREADNPIXELSEXTPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusEXT (void); +GL_APICALL void GL_APIENTRY glReadnPixelsEXT (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +GL_APICALL void GL_APIENTRY glGetnUniformfvEXT (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetnUniformivEXT (GLuint program, GLint location, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_EXT_robustness */ + +#ifndef GL_EXT_sRGB +#define GL_EXT_sRGB 1 +#define GL_SRGB_EXT 0x8C40 +#define GL_SRGB_ALPHA_EXT 0x8C42 +#define GL_SRGB8_ALPHA8_EXT 0x8C43 +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT 0x8210 +#endif /* GL_EXT_sRGB */ + +#ifndef GL_EXT_sRGB_write_control +#define GL_EXT_sRGB_write_control 1 +#define GL_FRAMEBUFFER_SRGB_EXT 0x8DB9 +#endif /* GL_EXT_sRGB_write_control */ + +#ifndef GL_EXT_semaphore +#define GL_EXT_semaphore 1 +#define GL_LAYOUT_GENERAL_EXT 0x958D +#define GL_LAYOUT_COLOR_ATTACHMENT_EXT 0x958E +#define GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT 0x958F +#define GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT 0x9590 +#define GL_LAYOUT_SHADER_READ_ONLY_EXT 0x9591 +#define GL_LAYOUT_TRANSFER_SRC_EXT 0x9592 +#define GL_LAYOUT_TRANSFER_DST_EXT 0x9593 +typedef void (GL_APIENTRYP PFNGLGENSEMAPHORESEXTPROC) (GLsizei n, GLuint *semaphores); +typedef void (GL_APIENTRYP PFNGLDELETESEMAPHORESEXTPROC) (GLsizei n, const GLuint *semaphores); +typedef GLboolean (GL_APIENTRYP PFNGLISSEMAPHOREEXTPROC) (GLuint semaphore); +typedef void (GL_APIENTRYP PFNGLSEMAPHOREPARAMETERUI64VEXTPROC) (GLuint semaphore, GLenum pname, const GLuint64 *params); +typedef void (GL_APIENTRYP PFNGLGETSEMAPHOREPARAMETERUI64VEXTPROC) (GLuint semaphore, GLenum pname, GLuint64 *params); +typedef void (GL_APIENTRYP PFNGLWAITSEMAPHOREEXTPROC) (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *srcLayouts); +typedef void (GL_APIENTRYP PFNGLSIGNALSEMAPHOREEXTPROC) (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *dstLayouts); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGenSemaphoresEXT (GLsizei n, GLuint *semaphores); +GL_APICALL void GL_APIENTRY glDeleteSemaphoresEXT (GLsizei n, const GLuint *semaphores); +GL_APICALL GLboolean GL_APIENTRY glIsSemaphoreEXT (GLuint semaphore); +GL_APICALL void GL_APIENTRY glSemaphoreParameterui64vEXT (GLuint semaphore, GLenum pname, const GLuint64 *params); +GL_APICALL void GL_APIENTRY glGetSemaphoreParameterui64vEXT (GLuint semaphore, GLenum pname, GLuint64 *params); +GL_APICALL void GL_APIENTRY glWaitSemaphoreEXT (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *srcLayouts); +GL_APICALL void GL_APIENTRY glSignalSemaphoreEXT (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *dstLayouts); +#endif +#endif /* GL_EXT_semaphore */ + +#ifndef GL_EXT_semaphore_fd +#define GL_EXT_semaphore_fd 1 +typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREFDEXTPROC) (GLuint semaphore, GLenum handleType, GLint fd); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glImportSemaphoreFdEXT (GLuint semaphore, GLenum handleType, GLint fd); +#endif +#endif /* GL_EXT_semaphore_fd */ + +#ifndef GL_EXT_semaphore_win32 +#define GL_EXT_semaphore_win32 1 +#define GL_HANDLE_TYPE_D3D12_FENCE_EXT 0x9594 +#define GL_D3D12_FENCE_VALUE_EXT 0x9595 +typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREWIN32HANDLEEXTPROC) (GLuint semaphore, GLenum handleType, void *handle); +typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREWIN32NAMEEXTPROC) (GLuint semaphore, GLenum handleType, const void *name); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glImportSemaphoreWin32HandleEXT (GLuint semaphore, GLenum handleType, void *handle); +GL_APICALL void GL_APIENTRY glImportSemaphoreWin32NameEXT (GLuint semaphore, GLenum handleType, const void *name); +#endif +#endif /* GL_EXT_semaphore_win32 */ + +#ifndef GL_EXT_separate_shader_objects +#define GL_EXT_separate_shader_objects 1 +#define GL_ACTIVE_PROGRAM_EXT 0x8259 +#define GL_VERTEX_SHADER_BIT_EXT 0x00000001 +#define GL_FRAGMENT_SHADER_BIT_EXT 0x00000002 +#define GL_ALL_SHADER_BITS_EXT 0xFFFFFFFF +#define GL_PROGRAM_SEPARABLE_EXT 0x8258 +#define GL_PROGRAM_PIPELINE_BINDING_EXT 0x825A +typedef void (GL_APIENTRYP PFNGLACTIVESHADERPROGRAMEXTPROC) (GLuint pipeline, GLuint program); +typedef void (GL_APIENTRYP PFNGLBINDPROGRAMPIPELINEEXTPROC) (GLuint pipeline); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROGRAMVEXTPROC) (GLenum type, GLsizei count, const GLchar **strings); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPIPELINESEXTPROC) (GLsizei n, const GLuint *pipelines); +typedef void (GL_APIENTRYP PFNGLGENPROGRAMPIPELINESEXTPROC) (GLsizei n, GLuint *pipelines); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGEXTPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEIVEXTPROC) (GLuint pipeline, GLenum pname, GLint *params); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPIPELINEEXTPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIEXTPROC) (GLuint program, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FEXTPROC) (GLuint program, GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IEXTPROC) (GLuint program, GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMSTAGESEXTPROC) (GLuint pipeline, GLbitfield stages, GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEEXTPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIEXTPROC) (GLuint program, GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveShaderProgramEXT (GLuint pipeline, GLuint program); +GL_APICALL void GL_APIENTRY glBindProgramPipelineEXT (GLuint pipeline); +GL_APICALL GLuint GL_APIENTRY glCreateShaderProgramvEXT (GLenum type, GLsizei count, const GLchar **strings); +GL_APICALL void GL_APIENTRY glDeleteProgramPipelinesEXT (GLsizei n, const GLuint *pipelines); +GL_APICALL void GL_APIENTRY glGenProgramPipelinesEXT (GLsizei n, GLuint *pipelines); +GL_APICALL void GL_APIENTRY glGetProgramPipelineInfoLogEXT (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetProgramPipelineivEXT (GLuint pipeline, GLenum pname, GLint *params); +GL_APICALL GLboolean GL_APIENTRY glIsProgramPipelineEXT (GLuint pipeline); +GL_APICALL void GL_APIENTRY glProgramParameteriEXT (GLuint program, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glProgramUniform1fEXT (GLuint program, GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glProgramUniform1fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform1iEXT (GLuint program, GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glProgramUniform1ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glProgramUniform2fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform2iEXT (GLuint program, GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glProgramUniform2ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glProgramUniform3fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform3iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glProgramUniform3ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glProgramUniform4fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform4iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glProgramUniform4ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgramStagesEXT (GLuint pipeline, GLbitfield stages, GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgramPipelineEXT (GLuint pipeline); +GL_APICALL void GL_APIENTRY glProgramUniform1uiEXT (GLuint program, GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#endif +#endif /* GL_EXT_separate_shader_objects */ + +#ifndef GL_EXT_shader_framebuffer_fetch +#define GL_EXT_shader_framebuffer_fetch 1 +#define GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT 0x8A52 +#endif /* GL_EXT_shader_framebuffer_fetch */ + +#ifndef GL_EXT_shader_group_vote +#define GL_EXT_shader_group_vote 1 +#endif /* GL_EXT_shader_group_vote */ + +#ifndef GL_EXT_shader_implicit_conversions +#define GL_EXT_shader_implicit_conversions 1 +#endif /* GL_EXT_shader_implicit_conversions */ + +#ifndef GL_EXT_shader_integer_mix +#define GL_EXT_shader_integer_mix 1 +#endif /* GL_EXT_shader_integer_mix */ + +#ifndef GL_EXT_shader_io_blocks +#define GL_EXT_shader_io_blocks 1 +#endif /* GL_EXT_shader_io_blocks */ + +#ifndef GL_EXT_shader_non_constant_global_initializers +#define GL_EXT_shader_non_constant_global_initializers 1 +#endif /* GL_EXT_shader_non_constant_global_initializers */ + +#ifndef GL_EXT_shader_pixel_local_storage +#define GL_EXT_shader_pixel_local_storage 1 +#define GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE_EXT 0x8F63 +#define GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_SIZE_EXT 0x8F67 +#define GL_SHADER_PIXEL_LOCAL_STORAGE_EXT 0x8F64 +#endif /* GL_EXT_shader_pixel_local_storage */ + +#ifndef GL_EXT_shader_pixel_local_storage2 +#define GL_EXT_shader_pixel_local_storage2 1 +#define GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_FAST_SIZE_EXT 0x9650 +#define GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_SIZE_EXT 0x9651 +#define GL_FRAMEBUFFER_INCOMPLETE_INSUFFICIENT_SHADER_COMBINED_LOCAL_STORAGE_EXT 0x9652 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERPIXELLOCALSTORAGESIZEEXTPROC) (GLuint target, GLsizei size); +typedef GLsizei (GL_APIENTRYP PFNGLGETFRAMEBUFFERPIXELLOCALSTORAGESIZEEXTPROC) (GLuint target); +typedef void (GL_APIENTRYP PFNGLCLEARPIXELLOCALSTORAGEUIEXTPROC) (GLsizei offset, GLsizei n, const GLuint *values); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferPixelLocalStorageSizeEXT (GLuint target, GLsizei size); +GL_APICALL GLsizei GL_APIENTRY glGetFramebufferPixelLocalStorageSizeEXT (GLuint target); +GL_APICALL void GL_APIENTRY glClearPixelLocalStorageuiEXT (GLsizei offset, GLsizei n, const GLuint *values); +#endif +#endif /* GL_EXT_shader_pixel_local_storage2 */ + +#ifndef GL_EXT_shader_texture_lod +#define GL_EXT_shader_texture_lod 1 +#endif /* GL_EXT_shader_texture_lod */ + +#ifndef GL_EXT_shadow_samplers +#define GL_EXT_shadow_samplers 1 +#define GL_TEXTURE_COMPARE_MODE_EXT 0x884C +#define GL_TEXTURE_COMPARE_FUNC_EXT 0x884D +#define GL_COMPARE_REF_TO_TEXTURE_EXT 0x884E +#define GL_SAMPLER_2D_SHADOW_EXT 0x8B62 +#endif /* GL_EXT_shadow_samplers */ + +#ifndef GL_EXT_sparse_texture +#define GL_EXT_sparse_texture 1 +#define GL_TEXTURE_SPARSE_EXT 0x91A6 +#define GL_VIRTUAL_PAGE_SIZE_INDEX_EXT 0x91A7 +#define GL_NUM_SPARSE_LEVELS_EXT 0x91AA +#define GL_NUM_VIRTUAL_PAGE_SIZES_EXT 0x91A8 +#define GL_VIRTUAL_PAGE_SIZE_X_EXT 0x9195 +#define GL_VIRTUAL_PAGE_SIZE_Y_EXT 0x9196 +#define GL_VIRTUAL_PAGE_SIZE_Z_EXT 0x9197 +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_3D 0x806F +#define GL_MAX_SPARSE_TEXTURE_SIZE_EXT 0x9198 +#define GL_MAX_SPARSE_3D_TEXTURE_SIZE_EXT 0x9199 +#define GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS_EXT 0x919A +#define GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_EXT 0x91A9 +typedef void (GL_APIENTRYP PFNGLTEXPAGECOMMITMENTEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexPageCommitmentEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit); +#endif +#endif /* GL_EXT_sparse_texture */ + +#ifndef GL_EXT_sparse_texture2 +#define GL_EXT_sparse_texture2 1 +#endif /* GL_EXT_sparse_texture2 */ + +#ifndef GL_EXT_tessellation_point_size +#define GL_EXT_tessellation_point_size 1 +#endif /* GL_EXT_tessellation_point_size */ + +#ifndef GL_EXT_tessellation_shader +#define GL_EXT_tessellation_shader 1 +#define GL_PATCHES_EXT 0x000E +#define GL_PATCH_VERTICES_EXT 0x8E72 +#define GL_TESS_CONTROL_OUTPUT_VERTICES_EXT 0x8E75 +#define GL_TESS_GEN_MODE_EXT 0x8E76 +#define GL_TESS_GEN_SPACING_EXT 0x8E77 +#define GL_TESS_GEN_VERTEX_ORDER_EXT 0x8E78 +#define GL_TESS_GEN_POINT_MODE_EXT 0x8E79 +#define GL_ISOLINES_EXT 0x8E7A +#define GL_QUADS_EXT 0x0007 +#define GL_FRACTIONAL_ODD_EXT 0x8E7B +#define GL_FRACTIONAL_EVEN_EXT 0x8E7C +#define GL_MAX_PATCH_VERTICES_EXT 0x8E7D +#define GL_MAX_TESS_GEN_LEVEL_EXT 0x8E7E +#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_EXT 0x8E7F +#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT 0x8E80 +#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_EXT 0x8E81 +#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_EXT 0x8E82 +#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_EXT 0x8E83 +#define GL_MAX_TESS_PATCH_COMPONENTS_EXT 0x8E84 +#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_EXT 0x8E85 +#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_EXT 0x8E86 +#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_EXT 0x8E89 +#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_EXT 0x8E8A +#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_EXT 0x886C +#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_EXT 0x886D +#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_EXT 0x8E1E +#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT 0x8E1F +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_EXT 0x92CD +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_EXT 0x92CE +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_EXT 0x92D3 +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_EXT 0x92D4 +#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_EXT 0x90CB +#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_EXT 0x90CC +#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_EXT 0x90D8 +#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_EXT 0x90D9 +#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED 0x8221 +#define GL_IS_PER_PATCH_EXT 0x92E7 +#define GL_REFERENCED_BY_TESS_CONTROL_SHADER_EXT 0x9307 +#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER_EXT 0x9308 +#define GL_TESS_CONTROL_SHADER_EXT 0x8E88 +#define GL_TESS_EVALUATION_SHADER_EXT 0x8E87 +#define GL_TESS_CONTROL_SHADER_BIT_EXT 0x00000008 +#define GL_TESS_EVALUATION_SHADER_BIT_EXT 0x00000010 +typedef void (GL_APIENTRYP PFNGLPATCHPARAMETERIEXTPROC) (GLenum pname, GLint value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPatchParameteriEXT (GLenum pname, GLint value); +#endif +#endif /* GL_EXT_tessellation_shader */ + +#ifndef GL_EXT_texture_border_clamp +#define GL_EXT_texture_border_clamp 1 +#define GL_TEXTURE_BORDER_COLOR_EXT 0x1004 +#define GL_CLAMP_TO_BORDER_EXT 0x812D +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, const GLuint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIIVEXTPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIUIVEXTPROC) (GLuint sampler, GLenum pname, const GLuint *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIIVEXTPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVEXTPROC) (GLuint sampler, GLenum pname, GLuint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexParameterIivEXT (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexParameterIuivEXT (GLenum target, GLenum pname, const GLuint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIivEXT (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIuivEXT (GLenum target, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glSamplerParameterIivEXT (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterIuivEXT (GLuint sampler, GLenum pname, const GLuint *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIivEXT (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIuivEXT (GLuint sampler, GLenum pname, GLuint *params); +#endif +#endif /* GL_EXT_texture_border_clamp */ + +#ifndef GL_EXT_texture_buffer +#define GL_EXT_texture_buffer 1 +#define GL_TEXTURE_BUFFER_EXT 0x8C2A +#define GL_TEXTURE_BUFFER_BINDING_EXT 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE_EXT 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER_EXT 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT 0x8C2D +#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_EXT 0x919F +#define GL_SAMPLER_BUFFER_EXT 0x8DC2 +#define GL_INT_SAMPLER_BUFFER_EXT 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER_EXT 0x8DD8 +#define GL_IMAGE_BUFFER_EXT 0x9051 +#define GL_INT_IMAGE_BUFFER_EXT 0x905C +#define GL_UNSIGNED_INT_IMAGE_BUFFER_EXT 0x9067 +#define GL_TEXTURE_BUFFER_OFFSET_EXT 0x919D +#define GL_TEXTURE_BUFFER_SIZE_EXT 0x919E +typedef void (GL_APIENTRYP PFNGLTEXBUFFEREXTPROC) (GLenum target, GLenum internalformat, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTEXBUFFERRANGEEXTPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexBufferEXT (GLenum target, GLenum internalformat, GLuint buffer); +GL_APICALL void GL_APIENTRY glTexBufferRangeEXT (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#endif +#endif /* GL_EXT_texture_buffer */ + +#ifndef GL_EXT_texture_compression_astc_decode_mode +#define GL_EXT_texture_compression_astc_decode_mode 1 +#define GL_TEXTURE_ASTC_DECODE_PRECISION_EXT 0x8F69 +#endif /* GL_EXT_texture_compression_astc_decode_mode */ + +#ifndef GL_EXT_texture_compression_dxt1 +#define GL_EXT_texture_compression_dxt1 1 +#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0 +#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1 +#endif /* GL_EXT_texture_compression_dxt1 */ + +#ifndef GL_EXT_texture_compression_s3tc +#define GL_EXT_texture_compression_s3tc 1 +#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2 +#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3 +#endif /* GL_EXT_texture_compression_s3tc */ + +#ifndef GL_EXT_texture_cube_map_array +#define GL_EXT_texture_cube_map_array 1 +#define GL_TEXTURE_CUBE_MAP_ARRAY_EXT 0x9009 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_EXT 0x900A +#define GL_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_EXT 0x900D +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900E +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900F +#define GL_IMAGE_CUBE_MAP_ARRAY_EXT 0x9054 +#define GL_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x905F +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x906A +#endif /* GL_EXT_texture_cube_map_array */ + +#ifndef GL_EXT_texture_filter_anisotropic +#define GL_EXT_texture_filter_anisotropic 1 +#define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE +#define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF +#endif /* GL_EXT_texture_filter_anisotropic */ + +#ifndef GL_EXT_texture_filter_minmax +#define GL_EXT_texture_filter_minmax 1 +#endif /* GL_EXT_texture_filter_minmax */ + +#ifndef GL_EXT_texture_format_BGRA8888 +#define GL_EXT_texture_format_BGRA8888 1 +#endif /* GL_EXT_texture_format_BGRA8888 */ + +#ifndef GL_EXT_texture_norm16 +#define GL_EXT_texture_norm16 1 +#define GL_R16_EXT 0x822A +#define GL_RG16_EXT 0x822C +#define GL_RGBA16_EXT 0x805B +#define GL_RGB16_EXT 0x8054 +#define GL_RGB16_SNORM_EXT 0x8F9A +#endif /* GL_EXT_texture_norm16 */ + +#ifndef GL_EXT_texture_rg +#define GL_EXT_texture_rg 1 +#define GL_RED_EXT 0x1903 +#define GL_RG_EXT 0x8227 +#define GL_R8_EXT 0x8229 +#define GL_RG8_EXT 0x822B +#endif /* GL_EXT_texture_rg */ + +#ifndef GL_EXT_texture_sRGB_R8 +#define GL_EXT_texture_sRGB_R8 1 +#define GL_SR8_EXT 0x8FBD +#endif /* GL_EXT_texture_sRGB_R8 */ + +#ifndef GL_EXT_texture_sRGB_RG8 +#define GL_EXT_texture_sRGB_RG8 1 +#define GL_SRG8_EXT 0x8FBE +#endif /* GL_EXT_texture_sRGB_RG8 */ + +#ifndef GL_EXT_texture_sRGB_decode +#define GL_EXT_texture_sRGB_decode 1 +#define GL_TEXTURE_SRGB_DECODE_EXT 0x8A48 +#define GL_DECODE_EXT 0x8A49 +#define GL_SKIP_DECODE_EXT 0x8A4A +#endif /* GL_EXT_texture_sRGB_decode */ + +#ifndef GL_EXT_texture_storage +#define GL_EXT_texture_storage 1 +#define GL_TEXTURE_IMMUTABLE_FORMAT_EXT 0x912F +#define GL_ALPHA8_EXT 0x803C +#define GL_LUMINANCE8_EXT 0x8040 +#define GL_LUMINANCE8_ALPHA8_EXT 0x8045 +#define GL_RGBA32F_EXT 0x8814 +#define GL_RGB32F_EXT 0x8815 +#define GL_ALPHA32F_EXT 0x8816 +#define GL_LUMINANCE32F_EXT 0x8818 +#define GL_LUMINANCE_ALPHA32F_EXT 0x8819 +#define GL_ALPHA16F_EXT 0x881C +#define GL_LUMINANCE16F_EXT 0x881E +#define GL_LUMINANCE_ALPHA16F_EXT 0x881F +#define GL_R32F_EXT 0x822E +#define GL_RG32F_EXT 0x8230 +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE1DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE1DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE2DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE3DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexStorage1DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GL_APICALL void GL_APIENTRY glTexStorage2DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage3DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_APICALL void GL_APIENTRY glTextureStorage1DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GL_APICALL void GL_APIENTRY glTextureStorage2DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTextureStorage3DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#endif +#endif /* GL_EXT_texture_storage */ + +#ifndef GL_EXT_texture_type_2_10_10_10_REV +#define GL_EXT_texture_type_2_10_10_10_REV 1 +#define GL_UNSIGNED_INT_2_10_10_10_REV_EXT 0x8368 +#endif /* GL_EXT_texture_type_2_10_10_10_REV */ + +#ifndef GL_EXT_texture_view +#define GL_EXT_texture_view 1 +#define GL_TEXTURE_VIEW_MIN_LEVEL_EXT 0x82DB +#define GL_TEXTURE_VIEW_NUM_LEVELS_EXT 0x82DC +#define GL_TEXTURE_VIEW_MIN_LAYER_EXT 0x82DD +#define GL_TEXTURE_VIEW_NUM_LAYERS_EXT 0x82DE +typedef void (GL_APIENTRYP PFNGLTEXTUREVIEWEXTPROC) (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTextureViewEXT (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#endif +#endif /* GL_EXT_texture_view */ + +#ifndef GL_EXT_unpack_subimage +#define GL_EXT_unpack_subimage 1 +#define GL_UNPACK_ROW_LENGTH_EXT 0x0CF2 +#define GL_UNPACK_SKIP_ROWS_EXT 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS_EXT 0x0CF4 +#endif /* GL_EXT_unpack_subimage */ + +#ifndef GL_EXT_win32_keyed_mutex +#define GL_EXT_win32_keyed_mutex 1 +typedef GLboolean (GL_APIENTRYP PFNGLACQUIREKEYEDMUTEXWIN32EXTPROC) (GLuint memory, GLuint64 key, GLuint timeout); +typedef GLboolean (GL_APIENTRYP PFNGLRELEASEKEYEDMUTEXWIN32EXTPROC) (GLuint memory, GLuint64 key); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLboolean GL_APIENTRY glAcquireKeyedMutexWin32EXT (GLuint memory, GLuint64 key, GLuint timeout); +GL_APICALL GLboolean GL_APIENTRY glReleaseKeyedMutexWin32EXT (GLuint memory, GLuint64 key); +#endif +#endif /* GL_EXT_win32_keyed_mutex */ + +#ifndef GL_EXT_window_rectangles +#define GL_EXT_window_rectangles 1 +#define GL_INCLUSIVE_EXT 0x8F10 +#define GL_EXCLUSIVE_EXT 0x8F11 +#define GL_WINDOW_RECTANGLE_EXT 0x8F12 +#define GL_WINDOW_RECTANGLE_MODE_EXT 0x8F13 +#define GL_MAX_WINDOW_RECTANGLES_EXT 0x8F14 +#define GL_NUM_WINDOW_RECTANGLES_EXT 0x8F15 +typedef void (GL_APIENTRYP PFNGLWINDOWRECTANGLESEXTPROC) (GLenum mode, GLsizei count, const GLint *box); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glWindowRectanglesEXT (GLenum mode, GLsizei count, const GLint *box); +#endif +#endif /* GL_EXT_window_rectangles */ + +#ifndef GL_FJ_shader_binary_GCCSO +#define GL_FJ_shader_binary_GCCSO 1 +#define GL_GCCSO_SHADER_BINARY_FJ 0x9260 +#endif /* GL_FJ_shader_binary_GCCSO */ + +#ifndef GL_IMG_bindless_texture +#define GL_IMG_bindless_texture 1 +typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTUREHANDLEIMGPROC) (GLuint texture); +typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTURESAMPLERHANDLEIMGPROC) (GLuint texture, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64IMGPROC) (GLint location, GLuint64 value); +typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64VIMGPROC) (GLint location, GLsizei count, const GLuint64 *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64IMGPROC) (GLuint program, GLint location, GLuint64 value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VIMGPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLuint64 GL_APIENTRY glGetTextureHandleIMG (GLuint texture); +GL_APICALL GLuint64 GL_APIENTRY glGetTextureSamplerHandleIMG (GLuint texture, GLuint sampler); +GL_APICALL void GL_APIENTRY glUniformHandleui64IMG (GLint location, GLuint64 value); +GL_APICALL void GL_APIENTRY glUniformHandleui64vIMG (GLint location, GLsizei count, const GLuint64 *value); +GL_APICALL void GL_APIENTRY glProgramUniformHandleui64IMG (GLuint program, GLint location, GLuint64 value); +GL_APICALL void GL_APIENTRY glProgramUniformHandleui64vIMG (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +#endif +#endif /* GL_IMG_bindless_texture */ + +#ifndef GL_IMG_framebuffer_downsample +#define GL_IMG_framebuffer_downsample 1 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_AND_DOWNSAMPLE_IMG 0x913C +#define GL_NUM_DOWNSAMPLE_SCALES_IMG 0x913D +#define GL_DOWNSAMPLE_SCALES_IMG 0x913E +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SCALE_IMG 0x913F +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DDOWNSAMPLEIMGPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint xscale, GLint yscale); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERDOWNSAMPLEIMGPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer, GLint xscale, GLint yscale); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTexture2DDownsampleIMG (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint xscale, GLint yscale); +GL_APICALL void GL_APIENTRY glFramebufferTextureLayerDownsampleIMG (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer, GLint xscale, GLint yscale); +#endif +#endif /* GL_IMG_framebuffer_downsample */ + +#ifndef GL_IMG_multisampled_render_to_texture +#define GL_IMG_multisampled_render_to_texture 1 +#define GL_RENDERBUFFER_SAMPLES_IMG 0x9133 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG 0x9134 +#define GL_MAX_SAMPLES_IMG 0x9135 +#define GL_TEXTURE_SAMPLES_IMG 0x9136 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEIMGPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEIMGPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleIMG (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTexture2DMultisampleIMG (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#endif +#endif /* GL_IMG_multisampled_render_to_texture */ + +#ifndef GL_IMG_program_binary +#define GL_IMG_program_binary 1 +#define GL_SGX_PROGRAM_BINARY_IMG 0x9130 +#endif /* GL_IMG_program_binary */ + +#ifndef GL_IMG_read_format +#define GL_IMG_read_format 1 +#define GL_BGRA_IMG 0x80E1 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV_IMG 0x8365 +#endif /* GL_IMG_read_format */ + +#ifndef GL_IMG_shader_binary +#define GL_IMG_shader_binary 1 +#define GL_SGX_BINARY_IMG 0x8C0A +#endif /* GL_IMG_shader_binary */ + +#ifndef GL_IMG_texture_compression_pvrtc +#define GL_IMG_texture_compression_pvrtc 1 +#define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00 +#define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01 +#define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02 +#define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03 +#endif /* GL_IMG_texture_compression_pvrtc */ + +#ifndef GL_IMG_texture_compression_pvrtc2 +#define GL_IMG_texture_compression_pvrtc2 1 +#define GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG 0x9137 +#define GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG 0x9138 +#endif /* GL_IMG_texture_compression_pvrtc2 */ + +#ifndef GL_IMG_texture_filter_cubic +#define GL_IMG_texture_filter_cubic 1 +#define GL_CUBIC_IMG 0x9139 +#define GL_CUBIC_MIPMAP_NEAREST_IMG 0x913A +#define GL_CUBIC_MIPMAP_LINEAR_IMG 0x913B +#endif /* GL_IMG_texture_filter_cubic */ + +#ifndef GL_INTEL_conservative_rasterization +#define GL_INTEL_conservative_rasterization 1 +#define GL_CONSERVATIVE_RASTERIZATION_INTEL 0x83FE +#endif /* GL_INTEL_conservative_rasterization */ + +#ifndef GL_INTEL_framebuffer_CMAA +#define GL_INTEL_framebuffer_CMAA 1 +typedef void (GL_APIENTRYP PFNGLAPPLYFRAMEBUFFERATTACHMENTCMAAINTELPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glApplyFramebufferAttachmentCMAAINTEL (void); +#endif +#endif /* GL_INTEL_framebuffer_CMAA */ + +#ifndef GL_INTEL_performance_query +#define GL_INTEL_performance_query 1 +#define GL_PERFQUERY_SINGLE_CONTEXT_INTEL 0x00000000 +#define GL_PERFQUERY_GLOBAL_CONTEXT_INTEL 0x00000001 +#define GL_PERFQUERY_WAIT_INTEL 0x83FB +#define GL_PERFQUERY_FLUSH_INTEL 0x83FA +#define GL_PERFQUERY_DONOT_FLUSH_INTEL 0x83F9 +#define GL_PERFQUERY_COUNTER_EVENT_INTEL 0x94F0 +#define GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL 0x94F1 +#define GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL 0x94F2 +#define GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL 0x94F3 +#define GL_PERFQUERY_COUNTER_RAW_INTEL 0x94F4 +#define GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL 0x94F5 +#define GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL 0x94F8 +#define GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL 0x94F9 +#define GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL 0x94FA +#define GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL 0x94FB +#define GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL 0x94FC +#define GL_PERFQUERY_QUERY_NAME_LENGTH_MAX_INTEL 0x94FD +#define GL_PERFQUERY_COUNTER_NAME_LENGTH_MAX_INTEL 0x94FE +#define GL_PERFQUERY_COUNTER_DESC_LENGTH_MAX_INTEL 0x94FF +#define GL_PERFQUERY_GPA_EXTENDED_COUNTERS_INTEL 0x9500 +typedef void (GL_APIENTRYP PFNGLBEGINPERFQUERYINTELPROC) (GLuint queryHandle); +typedef void (GL_APIENTRYP PFNGLCREATEPERFQUERYINTELPROC) (GLuint queryId, GLuint *queryHandle); +typedef void (GL_APIENTRYP PFNGLDELETEPERFQUERYINTELPROC) (GLuint queryHandle); +typedef void (GL_APIENTRYP PFNGLENDPERFQUERYINTELPROC) (GLuint queryHandle); +typedef void (GL_APIENTRYP PFNGLGETFIRSTPERFQUERYIDINTELPROC) (GLuint *queryId); +typedef void (GL_APIENTRYP PFNGLGETNEXTPERFQUERYIDINTELPROC) (GLuint queryId, GLuint *nextQueryId); +typedef void (GL_APIENTRYP PFNGLGETPERFCOUNTERINFOINTELPROC) (GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue); +typedef void (GL_APIENTRYP PFNGLGETPERFQUERYDATAINTELPROC) (GLuint queryHandle, GLuint flags, GLsizei dataSize, GLvoid *data, GLuint *bytesWritten); +typedef void (GL_APIENTRYP PFNGLGETPERFQUERYIDBYNAMEINTELPROC) (GLchar *queryName, GLuint *queryId); +typedef void (GL_APIENTRYP PFNGLGETPERFQUERYINFOINTELPROC) (GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBeginPerfQueryINTEL (GLuint queryHandle); +GL_APICALL void GL_APIENTRY glCreatePerfQueryINTEL (GLuint queryId, GLuint *queryHandle); +GL_APICALL void GL_APIENTRY glDeletePerfQueryINTEL (GLuint queryHandle); +GL_APICALL void GL_APIENTRY glEndPerfQueryINTEL (GLuint queryHandle); +GL_APICALL void GL_APIENTRY glGetFirstPerfQueryIdINTEL (GLuint *queryId); +GL_APICALL void GL_APIENTRY glGetNextPerfQueryIdINTEL (GLuint queryId, GLuint *nextQueryId); +GL_APICALL void GL_APIENTRY glGetPerfCounterInfoINTEL (GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue); +GL_APICALL void GL_APIENTRY glGetPerfQueryDataINTEL (GLuint queryHandle, GLuint flags, GLsizei dataSize, GLvoid *data, GLuint *bytesWritten); +GL_APICALL void GL_APIENTRY glGetPerfQueryIdByNameINTEL (GLchar *queryName, GLuint *queryId); +GL_APICALL void GL_APIENTRY glGetPerfQueryInfoINTEL (GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask); +#endif +#endif /* GL_INTEL_performance_query */ + +#ifndef GL_MESA_shader_integer_functions +#define GL_MESA_shader_integer_functions 1 +#endif /* GL_MESA_shader_integer_functions */ + +#ifndef GL_NVX_blend_equation_advanced_multi_draw_buffers +#define GL_NVX_blend_equation_advanced_multi_draw_buffers 1 +#endif /* GL_NVX_blend_equation_advanced_multi_draw_buffers */ + +#ifndef GL_NV_bindless_texture +#define GL_NV_bindless_texture 1 +typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTUREHANDLENVPROC) (GLuint texture); +typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTURESAMPLERHANDLENVPROC) (GLuint texture, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLMAKETEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle); +typedef void (GL_APIENTRYP PFNGLMAKETEXTUREHANDLENONRESIDENTNVPROC) (GLuint64 handle); +typedef GLuint64 (GL_APIENTRYP PFNGLGETIMAGEHANDLENVPROC) (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format); +typedef void (GL_APIENTRYP PFNGLMAKEIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle, GLenum access); +typedef void (GL_APIENTRYP PFNGLMAKEIMAGEHANDLENONRESIDENTNVPROC) (GLuint64 handle); +typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64NVPROC) (GLint location, GLuint64 value); +typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64VNVPROC) (GLint location, GLsizei count, const GLuint64 *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64NVPROC) (GLuint program, GLint location, GLuint64 value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle); +typedef GLboolean (GL_APIENTRYP PFNGLISIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLuint64 GL_APIENTRY glGetTextureHandleNV (GLuint texture); +GL_APICALL GLuint64 GL_APIENTRY glGetTextureSamplerHandleNV (GLuint texture, GLuint sampler); +GL_APICALL void GL_APIENTRY glMakeTextureHandleResidentNV (GLuint64 handle); +GL_APICALL void GL_APIENTRY glMakeTextureHandleNonResidentNV (GLuint64 handle); +GL_APICALL GLuint64 GL_APIENTRY glGetImageHandleNV (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format); +GL_APICALL void GL_APIENTRY glMakeImageHandleResidentNV (GLuint64 handle, GLenum access); +GL_APICALL void GL_APIENTRY glMakeImageHandleNonResidentNV (GLuint64 handle); +GL_APICALL void GL_APIENTRY glUniformHandleui64NV (GLint location, GLuint64 value); +GL_APICALL void GL_APIENTRY glUniformHandleui64vNV (GLint location, GLsizei count, const GLuint64 *value); +GL_APICALL void GL_APIENTRY glProgramUniformHandleui64NV (GLuint program, GLint location, GLuint64 value); +GL_APICALL void GL_APIENTRY glProgramUniformHandleui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +GL_APICALL GLboolean GL_APIENTRY glIsTextureHandleResidentNV (GLuint64 handle); +GL_APICALL GLboolean GL_APIENTRY glIsImageHandleResidentNV (GLuint64 handle); +#endif +#endif /* GL_NV_bindless_texture */ + +#ifndef GL_NV_blend_equation_advanced +#define GL_NV_blend_equation_advanced 1 +#define GL_BLEND_OVERLAP_NV 0x9281 +#define GL_BLEND_PREMULTIPLIED_SRC_NV 0x9280 +#define GL_BLUE_NV 0x1905 +#define GL_COLORBURN_NV 0x929A +#define GL_COLORDODGE_NV 0x9299 +#define GL_CONJOINT_NV 0x9284 +#define GL_CONTRAST_NV 0x92A1 +#define GL_DARKEN_NV 0x9297 +#define GL_DIFFERENCE_NV 0x929E +#define GL_DISJOINT_NV 0x9283 +#define GL_DST_ATOP_NV 0x928F +#define GL_DST_IN_NV 0x928B +#define GL_DST_NV 0x9287 +#define GL_DST_OUT_NV 0x928D +#define GL_DST_OVER_NV 0x9289 +#define GL_EXCLUSION_NV 0x92A0 +#define GL_GREEN_NV 0x1904 +#define GL_HARDLIGHT_NV 0x929B +#define GL_HARDMIX_NV 0x92A9 +#define GL_HSL_COLOR_NV 0x92AF +#define GL_HSL_HUE_NV 0x92AD +#define GL_HSL_LUMINOSITY_NV 0x92B0 +#define GL_HSL_SATURATION_NV 0x92AE +#define GL_INVERT_OVG_NV 0x92B4 +#define GL_INVERT_RGB_NV 0x92A3 +#define GL_LIGHTEN_NV 0x9298 +#define GL_LINEARBURN_NV 0x92A5 +#define GL_LINEARDODGE_NV 0x92A4 +#define GL_LINEARLIGHT_NV 0x92A7 +#define GL_MINUS_CLAMPED_NV 0x92B3 +#define GL_MINUS_NV 0x929F +#define GL_MULTIPLY_NV 0x9294 +#define GL_OVERLAY_NV 0x9296 +#define GL_PINLIGHT_NV 0x92A8 +#define GL_PLUS_CLAMPED_ALPHA_NV 0x92B2 +#define GL_PLUS_CLAMPED_NV 0x92B1 +#define GL_PLUS_DARKER_NV 0x9292 +#define GL_PLUS_NV 0x9291 +#define GL_RED_NV 0x1903 +#define GL_SCREEN_NV 0x9295 +#define GL_SOFTLIGHT_NV 0x929C +#define GL_SRC_ATOP_NV 0x928E +#define GL_SRC_IN_NV 0x928A +#define GL_SRC_NV 0x9286 +#define GL_SRC_OUT_NV 0x928C +#define GL_SRC_OVER_NV 0x9288 +#define GL_UNCORRELATED_NV 0x9282 +#define GL_VIVIDLIGHT_NV 0x92A6 +#define GL_XOR_NV 0x1506 +typedef void (GL_APIENTRYP PFNGLBLENDPARAMETERINVPROC) (GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLBLENDBARRIERNVPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlendParameteriNV (GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glBlendBarrierNV (void); +#endif +#endif /* GL_NV_blend_equation_advanced */ + +#ifndef GL_NV_blend_equation_advanced_coherent +#define GL_NV_blend_equation_advanced_coherent 1 +#define GL_BLEND_ADVANCED_COHERENT_NV 0x9285 +#endif /* GL_NV_blend_equation_advanced_coherent */ + +#ifndef GL_NV_conditional_render +#define GL_NV_conditional_render 1 +#define GL_QUERY_WAIT_NV 0x8E13 +#define GL_QUERY_NO_WAIT_NV 0x8E14 +#define GL_QUERY_BY_REGION_WAIT_NV 0x8E15 +#define GL_QUERY_BY_REGION_NO_WAIT_NV 0x8E16 +typedef void (GL_APIENTRYP PFNGLBEGINCONDITIONALRENDERNVPROC) (GLuint id, GLenum mode); +typedef void (GL_APIENTRYP PFNGLENDCONDITIONALRENDERNVPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBeginConditionalRenderNV (GLuint id, GLenum mode); +GL_APICALL void GL_APIENTRY glEndConditionalRenderNV (void); +#endif +#endif /* GL_NV_conditional_render */ + +#ifndef GL_NV_conservative_raster +#define GL_NV_conservative_raster 1 +#define GL_CONSERVATIVE_RASTERIZATION_NV 0x9346 +#define GL_SUBPIXEL_PRECISION_BIAS_X_BITS_NV 0x9347 +#define GL_SUBPIXEL_PRECISION_BIAS_Y_BITS_NV 0x9348 +#define GL_MAX_SUBPIXEL_PRECISION_BIAS_BITS_NV 0x9349 +typedef void (GL_APIENTRYP PFNGLSUBPIXELPRECISIONBIASNVPROC) (GLuint xbits, GLuint ybits); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glSubpixelPrecisionBiasNV (GLuint xbits, GLuint ybits); +#endif +#endif /* GL_NV_conservative_raster */ + +#ifndef GL_NV_conservative_raster_pre_snap_triangles +#define GL_NV_conservative_raster_pre_snap_triangles 1 +#define GL_CONSERVATIVE_RASTER_MODE_NV 0x954D +#define GL_CONSERVATIVE_RASTER_MODE_POST_SNAP_NV 0x954E +#define GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_TRIANGLES_NV 0x954F +typedef void (GL_APIENTRYP PFNGLCONSERVATIVERASTERPARAMETERINVPROC) (GLenum pname, GLint param); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glConservativeRasterParameteriNV (GLenum pname, GLint param); +#endif +#endif /* GL_NV_conservative_raster_pre_snap_triangles */ + +#ifndef GL_NV_copy_buffer +#define GL_NV_copy_buffer 1 +#define GL_COPY_READ_BUFFER_NV 0x8F36 +#define GL_COPY_WRITE_BUFFER_NV 0x8F37 +typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATANVPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCopyBufferSubDataNV (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +#endif +#endif /* GL_NV_copy_buffer */ + +#ifndef GL_NV_coverage_sample +#define GL_NV_coverage_sample 1 +#define GL_COVERAGE_COMPONENT_NV 0x8ED0 +#define GL_COVERAGE_COMPONENT4_NV 0x8ED1 +#define GL_COVERAGE_ATTACHMENT_NV 0x8ED2 +#define GL_COVERAGE_BUFFERS_NV 0x8ED3 +#define GL_COVERAGE_SAMPLES_NV 0x8ED4 +#define GL_COVERAGE_ALL_FRAGMENTS_NV 0x8ED5 +#define GL_COVERAGE_EDGE_FRAGMENTS_NV 0x8ED6 +#define GL_COVERAGE_AUTOMATIC_NV 0x8ED7 +#define GL_COVERAGE_BUFFER_BIT_NV 0x00008000 +typedef void (GL_APIENTRYP PFNGLCOVERAGEMASKNVPROC) (GLboolean mask); +typedef void (GL_APIENTRYP PFNGLCOVERAGEOPERATIONNVPROC) (GLenum operation); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCoverageMaskNV (GLboolean mask); +GL_APICALL void GL_APIENTRY glCoverageOperationNV (GLenum operation); +#endif +#endif /* GL_NV_coverage_sample */ + +#ifndef GL_NV_depth_nonlinear +#define GL_NV_depth_nonlinear 1 +#define GL_DEPTH_COMPONENT16_NONLINEAR_NV 0x8E2C +#endif /* GL_NV_depth_nonlinear */ + +#ifndef GL_NV_draw_buffers +#define GL_NV_draw_buffers 1 +#define GL_MAX_DRAW_BUFFERS_NV 0x8824 +#define GL_DRAW_BUFFER0_NV 0x8825 +#define GL_DRAW_BUFFER1_NV 0x8826 +#define GL_DRAW_BUFFER2_NV 0x8827 +#define GL_DRAW_BUFFER3_NV 0x8828 +#define GL_DRAW_BUFFER4_NV 0x8829 +#define GL_DRAW_BUFFER5_NV 0x882A +#define GL_DRAW_BUFFER6_NV 0x882B +#define GL_DRAW_BUFFER7_NV 0x882C +#define GL_DRAW_BUFFER8_NV 0x882D +#define GL_DRAW_BUFFER9_NV 0x882E +#define GL_DRAW_BUFFER10_NV 0x882F +#define GL_DRAW_BUFFER11_NV 0x8830 +#define GL_DRAW_BUFFER12_NV 0x8831 +#define GL_DRAW_BUFFER13_NV 0x8832 +#define GL_DRAW_BUFFER14_NV 0x8833 +#define GL_DRAW_BUFFER15_NV 0x8834 +#define GL_COLOR_ATTACHMENT0_NV 0x8CE0 +#define GL_COLOR_ATTACHMENT1_NV 0x8CE1 +#define GL_COLOR_ATTACHMENT2_NV 0x8CE2 +#define GL_COLOR_ATTACHMENT3_NV 0x8CE3 +#define GL_COLOR_ATTACHMENT4_NV 0x8CE4 +#define GL_COLOR_ATTACHMENT5_NV 0x8CE5 +#define GL_COLOR_ATTACHMENT6_NV 0x8CE6 +#define GL_COLOR_ATTACHMENT7_NV 0x8CE7 +#define GL_COLOR_ATTACHMENT8_NV 0x8CE8 +#define GL_COLOR_ATTACHMENT9_NV 0x8CE9 +#define GL_COLOR_ATTACHMENT10_NV 0x8CEA +#define GL_COLOR_ATTACHMENT11_NV 0x8CEB +#define GL_COLOR_ATTACHMENT12_NV 0x8CEC +#define GL_COLOR_ATTACHMENT13_NV 0x8CED +#define GL_COLOR_ATTACHMENT14_NV 0x8CEE +#define GL_COLOR_ATTACHMENT15_NV 0x8CEF +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSNVPROC) (GLsizei n, const GLenum *bufs); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawBuffersNV (GLsizei n, const GLenum *bufs); +#endif +#endif /* GL_NV_draw_buffers */ + +#ifndef GL_NV_draw_instanced +#define GL_NV_draw_instanced 1 +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDNVPROC) (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDNVPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawArraysInstancedNV (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedNV (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +#endif +#endif /* GL_NV_draw_instanced */ + +#ifndef GL_NV_draw_vulkan_image +#define GL_NV_draw_vulkan_image 1 +typedef void (GL_APIENTRY *GLVULKANPROCNV)(void); +typedef void (GL_APIENTRYP PFNGLDRAWVKIMAGENVPROC) (GLuint64 vkImage, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1); +typedef GLVULKANPROCNV (GL_APIENTRYP PFNGLGETVKPROCADDRNVPROC) (const GLchar *name); +typedef void (GL_APIENTRYP PFNGLWAITVKSEMAPHORENVPROC) (GLuint64 vkSemaphore); +typedef void (GL_APIENTRYP PFNGLSIGNALVKSEMAPHORENVPROC) (GLuint64 vkSemaphore); +typedef void (GL_APIENTRYP PFNGLSIGNALVKFENCENVPROC) (GLuint64 vkFence); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawVkImageNV (GLuint64 vkImage, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1); +GL_APICALL GLVULKANPROCNV GL_APIENTRY glGetVkProcAddrNV (const GLchar *name); +GL_APICALL void GL_APIENTRY glWaitVkSemaphoreNV (GLuint64 vkSemaphore); +GL_APICALL void GL_APIENTRY glSignalVkSemaphoreNV (GLuint64 vkSemaphore); +GL_APICALL void GL_APIENTRY glSignalVkFenceNV (GLuint64 vkFence); +#endif +#endif /* GL_NV_draw_vulkan_image */ + +#ifndef GL_NV_explicit_attrib_location +#define GL_NV_explicit_attrib_location 1 +#endif /* GL_NV_explicit_attrib_location */ + +#ifndef GL_NV_fbo_color_attachments +#define GL_NV_fbo_color_attachments 1 +#define GL_MAX_COLOR_ATTACHMENTS_NV 0x8CDF +#endif /* GL_NV_fbo_color_attachments */ + +#ifndef GL_NV_fence +#define GL_NV_fence 1 +#define GL_ALL_COMPLETED_NV 0x84F2 +#define GL_FENCE_STATUS_NV 0x84F3 +#define GL_FENCE_CONDITION_NV 0x84F4 +typedef void (GL_APIENTRYP PFNGLDELETEFENCESNVPROC) (GLsizei n, const GLuint *fences); +typedef void (GL_APIENTRYP PFNGLGENFENCESNVPROC) (GLsizei n, GLuint *fences); +typedef GLboolean (GL_APIENTRYP PFNGLISFENCENVPROC) (GLuint fence); +typedef GLboolean (GL_APIENTRYP PFNGLTESTFENCENVPROC) (GLuint fence); +typedef void (GL_APIENTRYP PFNGLGETFENCEIVNVPROC) (GLuint fence, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLFINISHFENCENVPROC) (GLuint fence); +typedef void (GL_APIENTRYP PFNGLSETFENCENVPROC) (GLuint fence, GLenum condition); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDeleteFencesNV (GLsizei n, const GLuint *fences); +GL_APICALL void GL_APIENTRY glGenFencesNV (GLsizei n, GLuint *fences); +GL_APICALL GLboolean GL_APIENTRY glIsFenceNV (GLuint fence); +GL_APICALL GLboolean GL_APIENTRY glTestFenceNV (GLuint fence); +GL_APICALL void GL_APIENTRY glGetFenceivNV (GLuint fence, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glFinishFenceNV (GLuint fence); +GL_APICALL void GL_APIENTRY glSetFenceNV (GLuint fence, GLenum condition); +#endif +#endif /* GL_NV_fence */ + +#ifndef GL_NV_fill_rectangle +#define GL_NV_fill_rectangle 1 +#define GL_FILL_RECTANGLE_NV 0x933C +#endif /* GL_NV_fill_rectangle */ + +#ifndef GL_NV_fragment_coverage_to_color +#define GL_NV_fragment_coverage_to_color 1 +#define GL_FRAGMENT_COVERAGE_TO_COLOR_NV 0x92DD +#define GL_FRAGMENT_COVERAGE_COLOR_NV 0x92DE +typedef void (GL_APIENTRYP PFNGLFRAGMENTCOVERAGECOLORNVPROC) (GLuint color); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFragmentCoverageColorNV (GLuint color); +#endif +#endif /* GL_NV_fragment_coverage_to_color */ + +#ifndef GL_NV_fragment_shader_interlock +#define GL_NV_fragment_shader_interlock 1 +#endif /* GL_NV_fragment_shader_interlock */ + +#ifndef GL_NV_framebuffer_blit +#define GL_NV_framebuffer_blit 1 +#define GL_READ_FRAMEBUFFER_NV 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_NV 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_NV 0x8CA6 +#define GL_READ_FRAMEBUFFER_BINDING_NV 0x8CAA +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERNVPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlitFramebufferNV (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#endif +#endif /* GL_NV_framebuffer_blit */ + +#ifndef GL_NV_framebuffer_mixed_samples +#define GL_NV_framebuffer_mixed_samples 1 +#define GL_COVERAGE_MODULATION_TABLE_NV 0x9331 +#define GL_COLOR_SAMPLES_NV 0x8E20 +#define GL_DEPTH_SAMPLES_NV 0x932D +#define GL_STENCIL_SAMPLES_NV 0x932E +#define GL_MIXED_DEPTH_SAMPLES_SUPPORTED_NV 0x932F +#define GL_MIXED_STENCIL_SAMPLES_SUPPORTED_NV 0x9330 +#define GL_COVERAGE_MODULATION_NV 0x9332 +#define GL_COVERAGE_MODULATION_TABLE_SIZE_NV 0x9333 +typedef void (GL_APIENTRYP PFNGLCOVERAGEMODULATIONTABLENVPROC) (GLsizei n, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLGETCOVERAGEMODULATIONTABLENVPROC) (GLsizei bufsize, GLfloat *v); +typedef void (GL_APIENTRYP PFNGLCOVERAGEMODULATIONNVPROC) (GLenum components); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCoverageModulationTableNV (GLsizei n, const GLfloat *v); +GL_APICALL void GL_APIENTRY glGetCoverageModulationTableNV (GLsizei bufsize, GLfloat *v); +GL_APICALL void GL_APIENTRY glCoverageModulationNV (GLenum components); +#endif +#endif /* GL_NV_framebuffer_mixed_samples */ + +#ifndef GL_NV_framebuffer_multisample +#define GL_NV_framebuffer_multisample 1 +#define GL_RENDERBUFFER_SAMPLES_NV 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_NV 0x8D56 +#define GL_MAX_SAMPLES_NV 0x8D57 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLENVPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleNV (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#endif +#endif /* GL_NV_framebuffer_multisample */ + +#ifndef GL_NV_generate_mipmap_sRGB +#define GL_NV_generate_mipmap_sRGB 1 +#endif /* GL_NV_generate_mipmap_sRGB */ + +#ifndef GL_NV_geometry_shader_passthrough +#define GL_NV_geometry_shader_passthrough 1 +#endif /* GL_NV_geometry_shader_passthrough */ + +#ifndef GL_NV_gpu_shader5 +#define GL_NV_gpu_shader5 1 +typedef khronos_int64_t GLint64EXT; +typedef khronos_uint64_t GLuint64EXT; +#define GL_INT64_NV 0x140E +#define GL_UNSIGNED_INT64_NV 0x140F +#define GL_INT8_NV 0x8FE0 +#define GL_INT8_VEC2_NV 0x8FE1 +#define GL_INT8_VEC3_NV 0x8FE2 +#define GL_INT8_VEC4_NV 0x8FE3 +#define GL_INT16_NV 0x8FE4 +#define GL_INT16_VEC2_NV 0x8FE5 +#define GL_INT16_VEC3_NV 0x8FE6 +#define GL_INT16_VEC4_NV 0x8FE7 +#define GL_INT64_VEC2_NV 0x8FE9 +#define GL_INT64_VEC3_NV 0x8FEA +#define GL_INT64_VEC4_NV 0x8FEB +#define GL_UNSIGNED_INT8_NV 0x8FEC +#define GL_UNSIGNED_INT8_VEC2_NV 0x8FED +#define GL_UNSIGNED_INT8_VEC3_NV 0x8FEE +#define GL_UNSIGNED_INT8_VEC4_NV 0x8FEF +#define GL_UNSIGNED_INT16_NV 0x8FF0 +#define GL_UNSIGNED_INT16_VEC2_NV 0x8FF1 +#define GL_UNSIGNED_INT16_VEC3_NV 0x8FF2 +#define GL_UNSIGNED_INT16_VEC4_NV 0x8FF3 +#define GL_UNSIGNED_INT64_VEC2_NV 0x8FF5 +#define GL_UNSIGNED_INT64_VEC3_NV 0x8FF6 +#define GL_UNSIGNED_INT64_VEC4_NV 0x8FF7 +#define GL_FLOAT16_NV 0x8FF8 +#define GL_FLOAT16_VEC2_NV 0x8FF9 +#define GL_FLOAT16_VEC3_NV 0x8FFA +#define GL_FLOAT16_VEC4_NV 0x8FFB +#define GL_PATCHES 0x000E +typedef void (GL_APIENTRYP PFNGLUNIFORM1I64NVPROC) (GLint location, GLint64EXT x); +typedef void (GL_APIENTRYP PFNGLUNIFORM2I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y); +typedef void (GL_APIENTRYP PFNGLUNIFORM3I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +typedef void (GL_APIENTRYP PFNGLUNIFORM4I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +typedef void (GL_APIENTRYP PFNGLUNIFORM1I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UI64NVPROC) (GLint location, GLuint64EXT x); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMI64VNVPROC) (GLuint program, GLint location, GLint64EXT *params); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1I64NVPROC) (GLuint program, GLint location, GLint64EXT x); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glUniform1i64NV (GLint location, GLint64EXT x); +GL_APICALL void GL_APIENTRY glUniform2i64NV (GLint location, GLint64EXT x, GLint64EXT y); +GL_APICALL void GL_APIENTRY glUniform3i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +GL_APICALL void GL_APIENTRY glUniform4i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +GL_APICALL void GL_APIENTRY glUniform1i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform2i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform3i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform4i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform1ui64NV (GLint location, GLuint64EXT x); +GL_APICALL void GL_APIENTRY glUniform2ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y); +GL_APICALL void GL_APIENTRY glUniform3ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +GL_APICALL void GL_APIENTRY glUniform4ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +GL_APICALL void GL_APIENTRY glUniform1ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform2ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform3ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform4ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glGetUniformi64vNV (GLuint program, GLint location, GLint64EXT *params); +GL_APICALL void GL_APIENTRY glProgramUniform1i64NV (GLuint program, GLint location, GLint64EXT x); +GL_APICALL void GL_APIENTRY glProgramUniform2i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y); +GL_APICALL void GL_APIENTRY glProgramUniform3i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +GL_APICALL void GL_APIENTRY glProgramUniform4i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +GL_APICALL void GL_APIENTRY glProgramUniform1i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform2i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform3i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform4i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform1ui64NV (GLuint program, GLint location, GLuint64EXT x); +GL_APICALL void GL_APIENTRY glProgramUniform2ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y); +GL_APICALL void GL_APIENTRY glProgramUniform3ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +GL_APICALL void GL_APIENTRY glProgramUniform4ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +GL_APICALL void GL_APIENTRY glProgramUniform1ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform2ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform3ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform4ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +#endif +#endif /* GL_NV_gpu_shader5 */ + +#ifndef GL_NV_image_formats +#define GL_NV_image_formats 1 +#endif /* GL_NV_image_formats */ + +#ifndef GL_NV_instanced_arrays +#define GL_NV_instanced_arrays 1 +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_NV 0x88FE +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORNVPROC) (GLuint index, GLuint divisor); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glVertexAttribDivisorNV (GLuint index, GLuint divisor); +#endif +#endif /* GL_NV_instanced_arrays */ + +#ifndef GL_NV_internalformat_sample_query +#define GL_NV_internalformat_sample_query 1 +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102 +#define GL_MULTISAMPLES_NV 0x9371 +#define GL_SUPERSAMPLE_SCALE_X_NV 0x9372 +#define GL_SUPERSAMPLE_SCALE_Y_NV 0x9373 +#define GL_CONFORMANT_NV 0x9374 +typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATSAMPLEIVNVPROC) (GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei bufSize, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetInternalformatSampleivNV (GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_NV_internalformat_sample_query */ + +#ifndef GL_NV_non_square_matrices +#define GL_NV_non_square_matrices 1 +#define GL_FLOAT_MAT2x3_NV 0x8B65 +#define GL_FLOAT_MAT2x4_NV 0x8B66 +#define GL_FLOAT_MAT3x2_NV 0x8B67 +#define GL_FLOAT_MAT3x4_NV 0x8B68 +#define GL_FLOAT_MAT4x2_NV 0x8B69 +#define GL_FLOAT_MAT4x3_NV 0x8B6A +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glUniformMatrix2x3fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x2fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2x4fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x2fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x4fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x3fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#endif +#endif /* GL_NV_non_square_matrices */ + +#ifndef GL_NV_path_rendering +#define GL_NV_path_rendering 1 +#define GL_PATH_FORMAT_SVG_NV 0x9070 +#define GL_PATH_FORMAT_PS_NV 0x9071 +#define GL_STANDARD_FONT_NAME_NV 0x9072 +#define GL_SYSTEM_FONT_NAME_NV 0x9073 +#define GL_FILE_NAME_NV 0x9074 +#define GL_PATH_STROKE_WIDTH_NV 0x9075 +#define GL_PATH_END_CAPS_NV 0x9076 +#define GL_PATH_INITIAL_END_CAP_NV 0x9077 +#define GL_PATH_TERMINAL_END_CAP_NV 0x9078 +#define GL_PATH_JOIN_STYLE_NV 0x9079 +#define GL_PATH_MITER_LIMIT_NV 0x907A +#define GL_PATH_DASH_CAPS_NV 0x907B +#define GL_PATH_INITIAL_DASH_CAP_NV 0x907C +#define GL_PATH_TERMINAL_DASH_CAP_NV 0x907D +#define GL_PATH_DASH_OFFSET_NV 0x907E +#define GL_PATH_CLIENT_LENGTH_NV 0x907F +#define GL_PATH_FILL_MODE_NV 0x9080 +#define GL_PATH_FILL_MASK_NV 0x9081 +#define GL_PATH_FILL_COVER_MODE_NV 0x9082 +#define GL_PATH_STROKE_COVER_MODE_NV 0x9083 +#define GL_PATH_STROKE_MASK_NV 0x9084 +#define GL_COUNT_UP_NV 0x9088 +#define GL_COUNT_DOWN_NV 0x9089 +#define GL_PATH_OBJECT_BOUNDING_BOX_NV 0x908A +#define GL_CONVEX_HULL_NV 0x908B +#define GL_BOUNDING_BOX_NV 0x908D +#define GL_TRANSLATE_X_NV 0x908E +#define GL_TRANSLATE_Y_NV 0x908F +#define GL_TRANSLATE_2D_NV 0x9090 +#define GL_TRANSLATE_3D_NV 0x9091 +#define GL_AFFINE_2D_NV 0x9092 +#define GL_AFFINE_3D_NV 0x9094 +#define GL_TRANSPOSE_AFFINE_2D_NV 0x9096 +#define GL_TRANSPOSE_AFFINE_3D_NV 0x9098 +#define GL_UTF8_NV 0x909A +#define GL_UTF16_NV 0x909B +#define GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV 0x909C +#define GL_PATH_COMMAND_COUNT_NV 0x909D +#define GL_PATH_COORD_COUNT_NV 0x909E +#define GL_PATH_DASH_ARRAY_COUNT_NV 0x909F +#define GL_PATH_COMPUTED_LENGTH_NV 0x90A0 +#define GL_PATH_FILL_BOUNDING_BOX_NV 0x90A1 +#define GL_PATH_STROKE_BOUNDING_BOX_NV 0x90A2 +#define GL_SQUARE_NV 0x90A3 +#define GL_ROUND_NV 0x90A4 +#define GL_TRIANGULAR_NV 0x90A5 +#define GL_BEVEL_NV 0x90A6 +#define GL_MITER_REVERT_NV 0x90A7 +#define GL_MITER_TRUNCATE_NV 0x90A8 +#define GL_SKIP_MISSING_GLYPH_NV 0x90A9 +#define GL_USE_MISSING_GLYPH_NV 0x90AA +#define GL_PATH_ERROR_POSITION_NV 0x90AB +#define GL_ACCUM_ADJACENT_PAIRS_NV 0x90AD +#define GL_ADJACENT_PAIRS_NV 0x90AE +#define GL_FIRST_TO_REST_NV 0x90AF +#define GL_PATH_GEN_MODE_NV 0x90B0 +#define GL_PATH_GEN_COEFF_NV 0x90B1 +#define GL_PATH_GEN_COMPONENTS_NV 0x90B3 +#define GL_PATH_STENCIL_FUNC_NV 0x90B7 +#define GL_PATH_STENCIL_REF_NV 0x90B8 +#define GL_PATH_STENCIL_VALUE_MASK_NV 0x90B9 +#define GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV 0x90BD +#define GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV 0x90BE +#define GL_PATH_COVER_DEPTH_FUNC_NV 0x90BF +#define GL_PATH_DASH_OFFSET_RESET_NV 0x90B4 +#define GL_MOVE_TO_RESETS_NV 0x90B5 +#define GL_MOVE_TO_CONTINUES_NV 0x90B6 +#define GL_CLOSE_PATH_NV 0x00 +#define GL_MOVE_TO_NV 0x02 +#define GL_RELATIVE_MOVE_TO_NV 0x03 +#define GL_LINE_TO_NV 0x04 +#define GL_RELATIVE_LINE_TO_NV 0x05 +#define GL_HORIZONTAL_LINE_TO_NV 0x06 +#define GL_RELATIVE_HORIZONTAL_LINE_TO_NV 0x07 +#define GL_VERTICAL_LINE_TO_NV 0x08 +#define GL_RELATIVE_VERTICAL_LINE_TO_NV 0x09 +#define GL_QUADRATIC_CURVE_TO_NV 0x0A +#define GL_RELATIVE_QUADRATIC_CURVE_TO_NV 0x0B +#define GL_CUBIC_CURVE_TO_NV 0x0C +#define GL_RELATIVE_CUBIC_CURVE_TO_NV 0x0D +#define GL_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0E +#define GL_RELATIVE_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0F +#define GL_SMOOTH_CUBIC_CURVE_TO_NV 0x10 +#define GL_RELATIVE_SMOOTH_CUBIC_CURVE_TO_NV 0x11 +#define GL_SMALL_CCW_ARC_TO_NV 0x12 +#define GL_RELATIVE_SMALL_CCW_ARC_TO_NV 0x13 +#define GL_SMALL_CW_ARC_TO_NV 0x14 +#define GL_RELATIVE_SMALL_CW_ARC_TO_NV 0x15 +#define GL_LARGE_CCW_ARC_TO_NV 0x16 +#define GL_RELATIVE_LARGE_CCW_ARC_TO_NV 0x17 +#define GL_LARGE_CW_ARC_TO_NV 0x18 +#define GL_RELATIVE_LARGE_CW_ARC_TO_NV 0x19 +#define GL_RESTART_PATH_NV 0xF0 +#define GL_DUP_FIRST_CUBIC_CURVE_TO_NV 0xF2 +#define GL_DUP_LAST_CUBIC_CURVE_TO_NV 0xF4 +#define GL_RECT_NV 0xF6 +#define GL_CIRCULAR_CCW_ARC_TO_NV 0xF8 +#define GL_CIRCULAR_CW_ARC_TO_NV 0xFA +#define GL_CIRCULAR_TANGENT_ARC_TO_NV 0xFC +#define GL_ARC_TO_NV 0xFE +#define GL_RELATIVE_ARC_TO_NV 0xFF +#define GL_BOLD_BIT_NV 0x01 +#define GL_ITALIC_BIT_NV 0x02 +#define GL_GLYPH_WIDTH_BIT_NV 0x01 +#define GL_GLYPH_HEIGHT_BIT_NV 0x02 +#define GL_GLYPH_HORIZONTAL_BEARING_X_BIT_NV 0x04 +#define GL_GLYPH_HORIZONTAL_BEARING_Y_BIT_NV 0x08 +#define GL_GLYPH_HORIZONTAL_BEARING_ADVANCE_BIT_NV 0x10 +#define GL_GLYPH_VERTICAL_BEARING_X_BIT_NV 0x20 +#define GL_GLYPH_VERTICAL_BEARING_Y_BIT_NV 0x40 +#define GL_GLYPH_VERTICAL_BEARING_ADVANCE_BIT_NV 0x80 +#define GL_GLYPH_HAS_KERNING_BIT_NV 0x100 +#define GL_FONT_X_MIN_BOUNDS_BIT_NV 0x00010000 +#define GL_FONT_Y_MIN_BOUNDS_BIT_NV 0x00020000 +#define GL_FONT_X_MAX_BOUNDS_BIT_NV 0x00040000 +#define GL_FONT_Y_MAX_BOUNDS_BIT_NV 0x00080000 +#define GL_FONT_UNITS_PER_EM_BIT_NV 0x00100000 +#define GL_FONT_ASCENDER_BIT_NV 0x00200000 +#define GL_FONT_DESCENDER_BIT_NV 0x00400000 +#define GL_FONT_HEIGHT_BIT_NV 0x00800000 +#define GL_FONT_MAX_ADVANCE_WIDTH_BIT_NV 0x01000000 +#define GL_FONT_MAX_ADVANCE_HEIGHT_BIT_NV 0x02000000 +#define GL_FONT_UNDERLINE_POSITION_BIT_NV 0x04000000 +#define GL_FONT_UNDERLINE_THICKNESS_BIT_NV 0x08000000 +#define GL_FONT_HAS_KERNING_BIT_NV 0x10000000 +#define GL_ROUNDED_RECT_NV 0xE8 +#define GL_RELATIVE_ROUNDED_RECT_NV 0xE9 +#define GL_ROUNDED_RECT2_NV 0xEA +#define GL_RELATIVE_ROUNDED_RECT2_NV 0xEB +#define GL_ROUNDED_RECT4_NV 0xEC +#define GL_RELATIVE_ROUNDED_RECT4_NV 0xED +#define GL_ROUNDED_RECT8_NV 0xEE +#define GL_RELATIVE_ROUNDED_RECT8_NV 0xEF +#define GL_RELATIVE_RECT_NV 0xF7 +#define GL_FONT_GLYPHS_AVAILABLE_NV 0x9368 +#define GL_FONT_TARGET_UNAVAILABLE_NV 0x9369 +#define GL_FONT_UNAVAILABLE_NV 0x936A +#define GL_FONT_UNINTELLIGIBLE_NV 0x936B +#define GL_CONIC_CURVE_TO_NV 0x1A +#define GL_RELATIVE_CONIC_CURVE_TO_NV 0x1B +#define GL_FONT_NUM_GLYPH_INDICES_BIT_NV 0x20000000 +#define GL_STANDARD_FONT_FORMAT_NV 0x936C +#define GL_PATH_PROJECTION_NV 0x1701 +#define GL_PATH_MODELVIEW_NV 0x1700 +#define GL_PATH_MODELVIEW_STACK_DEPTH_NV 0x0BA3 +#define GL_PATH_MODELVIEW_MATRIX_NV 0x0BA6 +#define GL_PATH_MAX_MODELVIEW_STACK_DEPTH_NV 0x0D36 +#define GL_PATH_TRANSPOSE_MODELVIEW_MATRIX_NV 0x84E3 +#define GL_PATH_PROJECTION_STACK_DEPTH_NV 0x0BA4 +#define GL_PATH_PROJECTION_MATRIX_NV 0x0BA7 +#define GL_PATH_MAX_PROJECTION_STACK_DEPTH_NV 0x0D38 +#define GL_PATH_TRANSPOSE_PROJECTION_MATRIX_NV 0x84E4 +#define GL_FRAGMENT_INPUT_NV 0x936D +typedef GLuint (GL_APIENTRYP PFNGLGENPATHSNVPROC) (GLsizei range); +typedef void (GL_APIENTRYP PFNGLDELETEPATHSNVPROC) (GLuint path, GLsizei range); +typedef GLboolean (GL_APIENTRYP PFNGLISPATHNVPROC) (GLuint path); +typedef void (GL_APIENTRYP PFNGLPATHCOMMANDSNVPROC) (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords); +typedef void (GL_APIENTRYP PFNGLPATHCOORDSNVPROC) (GLuint path, GLsizei numCoords, GLenum coordType, const void *coords); +typedef void (GL_APIENTRYP PFNGLPATHSUBCOMMANDSNVPROC) (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords); +typedef void (GL_APIENTRYP PFNGLPATHSUBCOORDSNVPROC) (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords); +typedef void (GL_APIENTRYP PFNGLPATHSTRINGNVPROC) (GLuint path, GLenum format, GLsizei length, const void *pathString); +typedef void (GL_APIENTRYP PFNGLPATHGLYPHSNVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef void (GL_APIENTRYP PFNGLPATHGLYPHRANGENVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef void (GL_APIENTRYP PFNGLWEIGHTPATHSNVPROC) (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights); +typedef void (GL_APIENTRYP PFNGLCOPYPATHNVPROC) (GLuint resultPath, GLuint srcPath); +typedef void (GL_APIENTRYP PFNGLINTERPOLATEPATHSNVPROC) (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight); +typedef void (GL_APIENTRYP PFNGLTRANSFORMPATHNVPROC) (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPATHPARAMETERINVPROC) (GLuint path, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPATHPARAMETERFNVPROC) (GLuint path, GLenum pname, GLfloat value); +typedef void (GL_APIENTRYP PFNGLPATHDASHARRAYNVPROC) (GLuint path, GLsizei dashCount, const GLfloat *dashArray); +typedef void (GL_APIENTRYP PFNGLPATHSTENCILFUNCNVPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLPATHSTENCILDEPTHOFFSETNVPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLSTENCILFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLSTENCILSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLPATHCOVERDEPTHFUNCNVPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLCOVERFILLPATHNVPROC) (GLuint path, GLenum coverMode); +typedef void (GL_APIENTRYP PFNGLCOVERSTROKEPATHNVPROC) (GLuint path, GLenum coverMode); +typedef void (GL_APIENTRYP PFNGLCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLGETPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, GLint *value); +typedef void (GL_APIENTRYP PFNGLGETPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, GLfloat *value); +typedef void (GL_APIENTRYP PFNGLGETPATHCOMMANDSNVPROC) (GLuint path, GLubyte *commands); +typedef void (GL_APIENTRYP PFNGLGETPATHCOORDSNVPROC) (GLuint path, GLfloat *coords); +typedef void (GL_APIENTRYP PFNGLGETPATHDASHARRAYNVPROC) (GLuint path, GLfloat *dashArray); +typedef void (GL_APIENTRYP PFNGLGETPATHMETRICSNVPROC) (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics); +typedef void (GL_APIENTRYP PFNGLGETPATHMETRICRANGENVPROC) (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics); +typedef void (GL_APIENTRYP PFNGLGETPATHSPACINGNVPROC) (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing); +typedef GLboolean (GL_APIENTRYP PFNGLISPOINTINFILLPATHNVPROC) (GLuint path, GLuint mask, GLfloat x, GLfloat y); +typedef GLboolean (GL_APIENTRYP PFNGLISPOINTINSTROKEPATHNVPROC) (GLuint path, GLfloat x, GLfloat y); +typedef GLfloat (GL_APIENTRYP PFNGLGETPATHLENGTHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments); +typedef GLboolean (GL_APIENTRYP PFNGLPOINTALONGPATHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY); +typedef void (GL_APIENTRYP PFNGLMATRIXLOAD3X2FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXLOAD3X3FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXLOADTRANSPOSE3X3FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXMULT3X2FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXMULT3X3FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXMULTTRANSPOSE3X3FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode); +typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask, GLenum coverMode); +typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef GLenum (GL_APIENTRYP PFNGLPATHGLYPHINDEXRANGENVPROC) (GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint baseAndCount[2]); +typedef GLenum (GL_APIENTRYP PFNGLPATHGLYPHINDEXARRAYNVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef GLenum (GL_APIENTRYP PFNGLPATHMEMORYGLYPHINDEXARRAYNVPROC) (GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef void (GL_APIENTRYP PFNGLPROGRAMPATHFRAGMENTINPUTGENNVPROC) (GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEFVNVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLfloat *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLuint GL_APIENTRY glGenPathsNV (GLsizei range); +GL_APICALL void GL_APIENTRY glDeletePathsNV (GLuint path, GLsizei range); +GL_APICALL GLboolean GL_APIENTRY glIsPathNV (GLuint path); +GL_APICALL void GL_APIENTRY glPathCommandsNV (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords); +GL_APICALL void GL_APIENTRY glPathCoordsNV (GLuint path, GLsizei numCoords, GLenum coordType, const void *coords); +GL_APICALL void GL_APIENTRY glPathSubCommandsNV (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords); +GL_APICALL void GL_APIENTRY glPathSubCoordsNV (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords); +GL_APICALL void GL_APIENTRY glPathStringNV (GLuint path, GLenum format, GLsizei length, const void *pathString); +GL_APICALL void GL_APIENTRY glPathGlyphsNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GL_APICALL void GL_APIENTRY glPathGlyphRangeNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GL_APICALL void GL_APIENTRY glWeightPathsNV (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights); +GL_APICALL void GL_APIENTRY glCopyPathNV (GLuint resultPath, GLuint srcPath); +GL_APICALL void GL_APIENTRY glInterpolatePathsNV (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight); +GL_APICALL void GL_APIENTRY glTransformPathNV (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glPathParameterivNV (GLuint path, GLenum pname, const GLint *value); +GL_APICALL void GL_APIENTRY glPathParameteriNV (GLuint path, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glPathParameterfvNV (GLuint path, GLenum pname, const GLfloat *value); +GL_APICALL void GL_APIENTRY glPathParameterfNV (GLuint path, GLenum pname, GLfloat value); +GL_APICALL void GL_APIENTRY glPathDashArrayNV (GLuint path, GLsizei dashCount, const GLfloat *dashArray); +GL_APICALL void GL_APIENTRY glPathStencilFuncNV (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glPathStencilDepthOffsetNV (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glStencilFillPathNV (GLuint path, GLenum fillMode, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilStrokePathNV (GLuint path, GLint reference, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glStencilStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glPathCoverDepthFuncNV (GLenum func); +GL_APICALL void GL_APIENTRY glCoverFillPathNV (GLuint path, GLenum coverMode); +GL_APICALL void GL_APIENTRY glCoverStrokePathNV (GLuint path, GLenum coverMode); +GL_APICALL void GL_APIENTRY glCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glGetPathParameterivNV (GLuint path, GLenum pname, GLint *value); +GL_APICALL void GL_APIENTRY glGetPathParameterfvNV (GLuint path, GLenum pname, GLfloat *value); +GL_APICALL void GL_APIENTRY glGetPathCommandsNV (GLuint path, GLubyte *commands); +GL_APICALL void GL_APIENTRY glGetPathCoordsNV (GLuint path, GLfloat *coords); +GL_APICALL void GL_APIENTRY glGetPathDashArrayNV (GLuint path, GLfloat *dashArray); +GL_APICALL void GL_APIENTRY glGetPathMetricsNV (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics); +GL_APICALL void GL_APIENTRY glGetPathMetricRangeNV (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics); +GL_APICALL void GL_APIENTRY glGetPathSpacingNV (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing); +GL_APICALL GLboolean GL_APIENTRY glIsPointInFillPathNV (GLuint path, GLuint mask, GLfloat x, GLfloat y); +GL_APICALL GLboolean GL_APIENTRY glIsPointInStrokePathNV (GLuint path, GLfloat x, GLfloat y); +GL_APICALL GLfloat GL_APIENTRY glGetPathLengthNV (GLuint path, GLsizei startSegment, GLsizei numSegments); +GL_APICALL GLboolean GL_APIENTRY glPointAlongPathNV (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY); +GL_APICALL void GL_APIENTRY glMatrixLoad3x2fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixLoad3x3fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixLoadTranspose3x3fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixMult3x2fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixMult3x3fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixMultTranspose3x3fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glStencilThenCoverFillPathNV (GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode); +GL_APICALL void GL_APIENTRY glStencilThenCoverStrokePathNV (GLuint path, GLint reference, GLuint mask, GLenum coverMode); +GL_APICALL void GL_APIENTRY glStencilThenCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glStencilThenCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GL_APICALL GLenum GL_APIENTRY glPathGlyphIndexRangeNV (GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint baseAndCount[2]); +GL_APICALL GLenum GL_APIENTRY glPathGlyphIndexArrayNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GL_APICALL GLenum GL_APIENTRY glPathMemoryGlyphIndexArrayNV (GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GL_APICALL void GL_APIENTRY glProgramPathFragmentInputGenNV (GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs); +GL_APICALL void GL_APIENTRY glGetProgramResourcefvNV (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLfloat *params); +#endif +#endif /* GL_NV_path_rendering */ + +#ifndef GL_NV_path_rendering_shared_edge +#define GL_NV_path_rendering_shared_edge 1 +#define GL_SHARED_EDGE_NV 0xC0 +#endif /* GL_NV_path_rendering_shared_edge */ + +#ifndef GL_NV_polygon_mode +#define GL_NV_polygon_mode 1 +#define GL_POLYGON_MODE_NV 0x0B40 +#define GL_POLYGON_OFFSET_POINT_NV 0x2A01 +#define GL_POLYGON_OFFSET_LINE_NV 0x2A02 +#define GL_POINT_NV 0x1B00 +#define GL_LINE_NV 0x1B01 +#define GL_FILL_NV 0x1B02 +typedef void (GL_APIENTRYP PFNGLPOLYGONMODENVPROC) (GLenum face, GLenum mode); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPolygonModeNV (GLenum face, GLenum mode); +#endif +#endif /* GL_NV_polygon_mode */ + +#ifndef GL_NV_read_buffer +#define GL_NV_read_buffer 1 +#define GL_READ_BUFFER_NV 0x0C02 +typedef void (GL_APIENTRYP PFNGLREADBUFFERNVPROC) (GLenum mode); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBufferNV (GLenum mode); +#endif +#endif /* GL_NV_read_buffer */ + +#ifndef GL_NV_read_buffer_front +#define GL_NV_read_buffer_front 1 +#endif /* GL_NV_read_buffer_front */ + +#ifndef GL_NV_read_depth +#define GL_NV_read_depth 1 +#endif /* GL_NV_read_depth */ + +#ifndef GL_NV_read_depth_stencil +#define GL_NV_read_depth_stencil 1 +#endif /* GL_NV_read_depth_stencil */ + +#ifndef GL_NV_read_stencil +#define GL_NV_read_stencil 1 +#endif /* GL_NV_read_stencil */ + +#ifndef GL_NV_sRGB_formats +#define GL_NV_sRGB_formats 1 +#define GL_SLUMINANCE_NV 0x8C46 +#define GL_SLUMINANCE_ALPHA_NV 0x8C44 +#define GL_SRGB8_NV 0x8C41 +#define GL_SLUMINANCE8_NV 0x8C47 +#define GL_SLUMINANCE8_ALPHA8_NV 0x8C45 +#define GL_COMPRESSED_SRGB_S3TC_DXT1_NV 0x8C4C +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_NV 0x8C4D +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_NV 0x8C4E +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_NV 0x8C4F +#define GL_ETC1_SRGB8_NV 0x88EE +#endif /* GL_NV_sRGB_formats */ + +#ifndef GL_NV_sample_locations +#define GL_NV_sample_locations 1 +#define GL_SAMPLE_LOCATION_SUBPIXEL_BITS_NV 0x933D +#define GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_NV 0x933E +#define GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_NV 0x933F +#define GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_NV 0x9340 +#define GL_SAMPLE_LOCATION_NV 0x8E50 +#define GL_PROGRAMMABLE_SAMPLE_LOCATION_NV 0x9341 +#define GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_NV 0x9342 +#define GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_NV 0x9343 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLenum target, GLuint start, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLNAMEDFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLRESOLVEDEPTHVALUESNVPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferSampleLocationsfvNV (GLenum target, GLuint start, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glNamedFramebufferSampleLocationsfvNV (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glResolveDepthValuesNV (void); +#endif +#endif /* GL_NV_sample_locations */ + +#ifndef GL_NV_sample_mask_override_coverage +#define GL_NV_sample_mask_override_coverage 1 +#endif /* GL_NV_sample_mask_override_coverage */ + +#ifndef GL_NV_shader_atomic_fp16_vector +#define GL_NV_shader_atomic_fp16_vector 1 +#endif /* GL_NV_shader_atomic_fp16_vector */ + +#ifndef GL_NV_shader_noperspective_interpolation +#define GL_NV_shader_noperspective_interpolation 1 +#endif /* GL_NV_shader_noperspective_interpolation */ + +#ifndef GL_NV_shadow_samplers_array +#define GL_NV_shadow_samplers_array 1 +#define GL_SAMPLER_2D_ARRAY_SHADOW_NV 0x8DC4 +#endif /* GL_NV_shadow_samplers_array */ + +#ifndef GL_NV_shadow_samplers_cube +#define GL_NV_shadow_samplers_cube 1 +#define GL_SAMPLER_CUBE_SHADOW_NV 0x8DC5 +#endif /* GL_NV_shadow_samplers_cube */ + +#ifndef GL_NV_texture_border_clamp +#define GL_NV_texture_border_clamp 1 +#define GL_TEXTURE_BORDER_COLOR_NV 0x1004 +#define GL_CLAMP_TO_BORDER_NV 0x812D +#endif /* GL_NV_texture_border_clamp */ + +#ifndef GL_NV_texture_compression_s3tc_update +#define GL_NV_texture_compression_s3tc_update 1 +#endif /* GL_NV_texture_compression_s3tc_update */ + +#ifndef GL_NV_texture_npot_2D_mipmap +#define GL_NV_texture_npot_2D_mipmap 1 +#endif /* GL_NV_texture_npot_2D_mipmap */ + +#ifndef GL_NV_viewport_array +#define GL_NV_viewport_array 1 +#define GL_MAX_VIEWPORTS_NV 0x825B +#define GL_VIEWPORT_SUBPIXEL_BITS_NV 0x825C +#define GL_VIEWPORT_BOUNDS_RANGE_NV 0x825D +#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV 0x825F +typedef void (GL_APIENTRYP PFNGLVIEWPORTARRAYVNVPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFNVPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFVNVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLSCISSORARRAYVNVPROC) (GLuint first, GLsizei count, const GLint *v); +typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDNVPROC) (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDVNVPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEARRAYFVNVPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEINDEXEDFNVPROC) (GLuint index, GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLGETFLOATI_VNVPROC) (GLenum target, GLuint index, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLENABLEINVPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDISABLEINVPROC) (GLenum target, GLuint index); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDINVPROC) (GLenum target, GLuint index); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glViewportArrayvNV (GLuint first, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glViewportIndexedfNV (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +GL_APICALL void GL_APIENTRY glViewportIndexedfvNV (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glScissorArrayvNV (GLuint first, GLsizei count, const GLint *v); +GL_APICALL void GL_APIENTRY glScissorIndexedNV (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glScissorIndexedvNV (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glDepthRangeArrayfvNV (GLuint first, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glDepthRangeIndexedfNV (GLuint index, GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glGetFloati_vNV (GLenum target, GLuint index, GLfloat *data); +GL_APICALL void GL_APIENTRY glEnableiNV (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisableiNV (GLenum target, GLuint index); +GL_APICALL GLboolean GL_APIENTRY glIsEnablediNV (GLenum target, GLuint index); +#endif +#endif /* GL_NV_viewport_array */ + +#ifndef GL_NV_viewport_array2 +#define GL_NV_viewport_array2 1 +#endif /* GL_NV_viewport_array2 */ + +#ifndef GL_NV_viewport_swizzle +#define GL_NV_viewport_swizzle 1 +#define GL_VIEWPORT_SWIZZLE_POSITIVE_X_NV 0x9350 +#define GL_VIEWPORT_SWIZZLE_NEGATIVE_X_NV 0x9351 +#define GL_VIEWPORT_SWIZZLE_POSITIVE_Y_NV 0x9352 +#define GL_VIEWPORT_SWIZZLE_NEGATIVE_Y_NV 0x9353 +#define GL_VIEWPORT_SWIZZLE_POSITIVE_Z_NV 0x9354 +#define GL_VIEWPORT_SWIZZLE_NEGATIVE_Z_NV 0x9355 +#define GL_VIEWPORT_SWIZZLE_POSITIVE_W_NV 0x9356 +#define GL_VIEWPORT_SWIZZLE_NEGATIVE_W_NV 0x9357 +#define GL_VIEWPORT_SWIZZLE_X_NV 0x9358 +#define GL_VIEWPORT_SWIZZLE_Y_NV 0x9359 +#define GL_VIEWPORT_SWIZZLE_Z_NV 0x935A +#define GL_VIEWPORT_SWIZZLE_W_NV 0x935B +typedef void (GL_APIENTRYP PFNGLVIEWPORTSWIZZLENVPROC) (GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glViewportSwizzleNV (GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew); +#endif +#endif /* GL_NV_viewport_swizzle */ + +#ifndef GL_OVR_multiview +#define GL_OVR_multiview 1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_NUM_VIEWS_OVR 0x9630 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_BASE_VIEW_INDEX_OVR 0x9632 +#define GL_MAX_VIEWS_OVR 0x9631 +#define GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR 0x9633 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTextureMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); +#endif +#endif /* GL_OVR_multiview */ + +#ifndef GL_OVR_multiview2 +#define GL_OVR_multiview2 1 +#endif /* GL_OVR_multiview2 */ + +#ifndef GL_OVR_multiview_multisampled_render_to_texture +#define GL_OVR_multiview_multisampled_render_to_texture 1 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTISAMPLEMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLsizei samples, GLint baseViewIndex, GLsizei numViews); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTextureMultisampleMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLsizei samples, GLint baseViewIndex, GLsizei numViews); +#endif +#endif /* GL_OVR_multiview_multisampled_render_to_texture */ + +#ifndef GL_QCOM_alpha_test +#define GL_QCOM_alpha_test 1 +#define GL_ALPHA_TEST_QCOM 0x0BC0 +#define GL_ALPHA_TEST_FUNC_QCOM 0x0BC1 +#define GL_ALPHA_TEST_REF_QCOM 0x0BC2 +typedef void (GL_APIENTRYP PFNGLALPHAFUNCQCOMPROC) (GLenum func, GLclampf ref); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glAlphaFuncQCOM (GLenum func, GLclampf ref); +#endif +#endif /* GL_QCOM_alpha_test */ + +#ifndef GL_QCOM_binning_control +#define GL_QCOM_binning_control 1 +#define GL_BINNING_CONTROL_HINT_QCOM 0x8FB0 +#define GL_CPU_OPTIMIZED_QCOM 0x8FB1 +#define GL_GPU_OPTIMIZED_QCOM 0x8FB2 +#define GL_RENDER_DIRECT_TO_FRAMEBUFFER_QCOM 0x8FB3 +#endif /* GL_QCOM_binning_control */ + +#ifndef GL_QCOM_driver_control +#define GL_QCOM_driver_control 1 +typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSQCOMPROC) (GLint *num, GLsizei size, GLuint *driverControls); +typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSTRINGQCOMPROC) (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString); +typedef void (GL_APIENTRYP PFNGLENABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl); +typedef void (GL_APIENTRYP PFNGLDISABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetDriverControlsQCOM (GLint *num, GLsizei size, GLuint *driverControls); +GL_APICALL void GL_APIENTRY glGetDriverControlStringQCOM (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString); +GL_APICALL void GL_APIENTRY glEnableDriverControlQCOM (GLuint driverControl); +GL_APICALL void GL_APIENTRY glDisableDriverControlQCOM (GLuint driverControl); +#endif +#endif /* GL_QCOM_driver_control */ + +#ifndef GL_QCOM_extended_get +#define GL_QCOM_extended_get 1 +#define GL_TEXTURE_WIDTH_QCOM 0x8BD2 +#define GL_TEXTURE_HEIGHT_QCOM 0x8BD3 +#define GL_TEXTURE_DEPTH_QCOM 0x8BD4 +#define GL_TEXTURE_INTERNAL_FORMAT_QCOM 0x8BD5 +#define GL_TEXTURE_FORMAT_QCOM 0x8BD6 +#define GL_TEXTURE_TYPE_QCOM 0x8BD7 +#define GL_TEXTURE_IMAGE_VALID_QCOM 0x8BD8 +#define GL_TEXTURE_NUM_LEVELS_QCOM 0x8BD9 +#define GL_TEXTURE_TARGET_QCOM 0x8BDA +#define GL_TEXTURE_OBJECT_VALID_QCOM 0x8BDB +#define GL_STATE_RESTORE 0x8BDC +typedef void (GL_APIENTRYP PFNGLEXTGETTEXTURESQCOMPROC) (GLuint *textures, GLint maxTextures, GLint *numTextures); +typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERSQCOMPROC) (GLuint *buffers, GLint maxBuffers, GLint *numBuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETRENDERBUFFERSQCOMPROC) (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETFRAMEBUFFERSQCOMPROC) (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETTEXLEVELPARAMETERIVQCOMPROC) (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLEXTTEXOBJECTSTATEOVERRIDEIQCOMPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLEXTGETTEXSUBIMAGEQCOMPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels); +typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERPOINTERVQCOMPROC) (GLenum target, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glExtGetTexturesQCOM (GLuint *textures, GLint maxTextures, GLint *numTextures); +GL_APICALL void GL_APIENTRY glExtGetBuffersQCOM (GLuint *buffers, GLint maxBuffers, GLint *numBuffers); +GL_APICALL void GL_APIENTRY glExtGetRenderbuffersQCOM (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers); +GL_APICALL void GL_APIENTRY glExtGetFramebuffersQCOM (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers); +GL_APICALL void GL_APIENTRY glExtGetTexLevelParameterivQCOM (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glExtTexObjectStateOverrideiQCOM (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glExtGetTexSubImageQCOM (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels); +GL_APICALL void GL_APIENTRY glExtGetBufferPointervQCOM (GLenum target, void **params); +#endif +#endif /* GL_QCOM_extended_get */ + +#ifndef GL_QCOM_extended_get2 +#define GL_QCOM_extended_get2 1 +typedef void (GL_APIENTRYP PFNGLEXTGETSHADERSQCOMPROC) (GLuint *shaders, GLint maxShaders, GLint *numShaders); +typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMSQCOMPROC) (GLuint *programs, GLint maxPrograms, GLint *numPrograms); +typedef GLboolean (GL_APIENTRYP PFNGLEXTISPROGRAMBINARYQCOMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMBINARYSOURCEQCOMPROC) (GLuint program, GLenum shadertype, GLchar *source, GLint *length); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glExtGetShadersQCOM (GLuint *shaders, GLint maxShaders, GLint *numShaders); +GL_APICALL void GL_APIENTRY glExtGetProgramsQCOM (GLuint *programs, GLint maxPrograms, GLint *numPrograms); +GL_APICALL GLboolean GL_APIENTRY glExtIsProgramBinaryQCOM (GLuint program); +GL_APICALL void GL_APIENTRY glExtGetProgramBinarySourceQCOM (GLuint program, GLenum shadertype, GLchar *source, GLint *length); +#endif +#endif /* GL_QCOM_extended_get2 */ + +#ifndef GL_QCOM_framebuffer_foveated +#define GL_QCOM_framebuffer_foveated 1 +#define GL_FOVEATION_ENABLE_BIT_QCOM 0x00000001 +#define GL_FOVEATION_SCALED_BIN_METHOD_BIT_QCOM 0x00000002 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFOVEATIONCONFIGQCOMPROC) (GLuint framebuffer, GLuint numLayers, GLuint focalPointsPerLayer, GLuint requestedFeatures, GLuint *providedFeatures); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFOVEATIONPARAMETERSQCOMPROC) (GLuint framebuffer, GLuint layer, GLuint focalPoint, GLfloat focalX, GLfloat focalY, GLfloat gainX, GLfloat gainY, GLfloat foveaArea); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferFoveationConfigQCOM (GLuint framebuffer, GLuint numLayers, GLuint focalPointsPerLayer, GLuint requestedFeatures, GLuint *providedFeatures); +GL_APICALL void GL_APIENTRY glFramebufferFoveationParametersQCOM (GLuint framebuffer, GLuint layer, GLuint focalPoint, GLfloat focalX, GLfloat focalY, GLfloat gainX, GLfloat gainY, GLfloat foveaArea); +#endif +#endif /* GL_QCOM_framebuffer_foveated */ + +#ifndef GL_QCOM_perfmon_global_mode +#define GL_QCOM_perfmon_global_mode 1 +#define GL_PERFMON_GLOBAL_MODE_QCOM 0x8FA0 +#endif /* GL_QCOM_perfmon_global_mode */ + +#ifndef GL_QCOM_shader_framebuffer_fetch_noncoherent +#define GL_QCOM_shader_framebuffer_fetch_noncoherent 1 +#define GL_FRAMEBUFFER_FETCH_NONCOHERENT_QCOM 0x96A2 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFETCHBARRIERQCOMPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferFetchBarrierQCOM (void); +#endif +#endif /* GL_QCOM_shader_framebuffer_fetch_noncoherent */ + +#ifndef GL_QCOM_tiled_rendering +#define GL_QCOM_tiled_rendering 1 +#define GL_COLOR_BUFFER_BIT0_QCOM 0x00000001 +#define GL_COLOR_BUFFER_BIT1_QCOM 0x00000002 +#define GL_COLOR_BUFFER_BIT2_QCOM 0x00000004 +#define GL_COLOR_BUFFER_BIT3_QCOM 0x00000008 +#define GL_COLOR_BUFFER_BIT4_QCOM 0x00000010 +#define GL_COLOR_BUFFER_BIT5_QCOM 0x00000020 +#define GL_COLOR_BUFFER_BIT6_QCOM 0x00000040 +#define GL_COLOR_BUFFER_BIT7_QCOM 0x00000080 +#define GL_DEPTH_BUFFER_BIT0_QCOM 0x00000100 +#define GL_DEPTH_BUFFER_BIT1_QCOM 0x00000200 +#define GL_DEPTH_BUFFER_BIT2_QCOM 0x00000400 +#define GL_DEPTH_BUFFER_BIT3_QCOM 0x00000800 +#define GL_DEPTH_BUFFER_BIT4_QCOM 0x00001000 +#define GL_DEPTH_BUFFER_BIT5_QCOM 0x00002000 +#define GL_DEPTH_BUFFER_BIT6_QCOM 0x00004000 +#define GL_DEPTH_BUFFER_BIT7_QCOM 0x00008000 +#define GL_STENCIL_BUFFER_BIT0_QCOM 0x00010000 +#define GL_STENCIL_BUFFER_BIT1_QCOM 0x00020000 +#define GL_STENCIL_BUFFER_BIT2_QCOM 0x00040000 +#define GL_STENCIL_BUFFER_BIT3_QCOM 0x00080000 +#define GL_STENCIL_BUFFER_BIT4_QCOM 0x00100000 +#define GL_STENCIL_BUFFER_BIT5_QCOM 0x00200000 +#define GL_STENCIL_BUFFER_BIT6_QCOM 0x00400000 +#define GL_STENCIL_BUFFER_BIT7_QCOM 0x00800000 +#define GL_MULTISAMPLE_BUFFER_BIT0_QCOM 0x01000000 +#define GL_MULTISAMPLE_BUFFER_BIT1_QCOM 0x02000000 +#define GL_MULTISAMPLE_BUFFER_BIT2_QCOM 0x04000000 +#define GL_MULTISAMPLE_BUFFER_BIT3_QCOM 0x08000000 +#define GL_MULTISAMPLE_BUFFER_BIT4_QCOM 0x10000000 +#define GL_MULTISAMPLE_BUFFER_BIT5_QCOM 0x20000000 +#define GL_MULTISAMPLE_BUFFER_BIT6_QCOM 0x40000000 +#define GL_MULTISAMPLE_BUFFER_BIT7_QCOM 0x80000000 +typedef void (GL_APIENTRYP PFNGLSTARTTILINGQCOMPROC) (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask); +typedef void (GL_APIENTRYP PFNGLENDTILINGQCOMPROC) (GLbitfield preserveMask); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glStartTilingQCOM (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask); +GL_APICALL void GL_APIENTRY glEndTilingQCOM (GLbitfield preserveMask); +#endif +#endif /* GL_QCOM_tiled_rendering */ + +#ifndef GL_QCOM_writeonly_rendering +#define GL_QCOM_writeonly_rendering 1 +#define GL_WRITEONLY_RENDERING_QCOM 0x8823 +#endif /* GL_QCOM_writeonly_rendering */ + +#ifndef GL_VIV_shader_binary +#define GL_VIV_shader_binary 1 +#define GL_SHADER_BINARY_VIV 0x8FC4 +#endif /* GL_VIV_shader_binary */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/GLES2/gl2platform.h b/sources/khronos/GLES2/gl2platform.h new file mode 100755 index 00000000..4fb8ccf2 --- /dev/null +++ b/sources/khronos/GLES2/gl2platform.h @@ -0,0 +1,38 @@ +#ifndef __gl2platform_h_ +#define __gl2platform_h_ + +/* +** Copyright (c) 2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* Platform-specific types and definitions for OpenGL ES 2.X gl2.h + * + * Adopters may modify khrplatform.h and this file to suit their platform. + * Please contribute modifications back to Khronos as pull requests on the + * public github repository: + * https://github.com/KhronosGroup/OpenGL-Registry + */ + +#include + +#ifndef GL_APICALL +#define GL_APICALL KHRONOS_APICALL +#endif + +#ifndef GL_APIENTRY +#define GL_APIENTRY KHRONOS_APIENTRY +#endif + +#endif /* __gl2platform_h_ */ diff --git a/sources/khronos/GLES3/gl3.h b/sources/khronos/GLES3/gl3.h new file mode 100755 index 00000000..ec0e062c --- /dev/null +++ b/sources/khronos/GLES3/gl3.h @@ -0,0 +1,1211 @@ +#ifndef __gl3_h_ +#define __gl3_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#include + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +#ifndef GL_GLES_PROTOTYPES +#define GL_GLES_PROTOTYPES 1 +#endif + +/* Generated on date 20170613 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9]|3\.0 + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_ES_VERSION_2_0 +#define GL_ES_VERSION_2_0 1 +#include +typedef khronos_int8_t GLbyte; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLfixed; +typedef short GLshort; +typedef unsigned short GLushort; +typedef void GLvoid; +typedef struct __GLsync *GLsync; +typedef khronos_int64_t GLint64; +typedef khronos_uint64_t GLuint64; +typedef unsigned int GLenum; +typedef unsigned int GLuint; +typedef char GLchar; +typedef khronos_float_t GLfloat; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_uint8_t GLubyte; +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_FUNC_ADD 0x8006 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_BLEND 0x0BE2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_LINE_WIDTH 0x0B21 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VIEWPORT 0x0BA2 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_SHADER_TYPE 0x8B4F +#define GL_DELETE_STATUS 0x8B80 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_INVERT 0x150A +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGB565 0x8D62 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_NONE 0 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s); +typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures); +typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer); +typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode); +typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width); +typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture); +GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target); +GL_APICALL void GL_APIENTRY glClear (GLbitfield mask); +GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d); +GL_APICALL void GL_APIENTRY glClearStencil (GLint s); +GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader); +GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL GLuint GL_APIENTRY glCreateProgram (void); +GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type); +GL_APICALL void GL_APIENTRY glCullFace (GLenum mode); +GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program); +GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader); +GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func); +GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag); +GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glDisable (GLenum cap); +GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glEnable (GLenum cap); +GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glFinish (void); +GL_APICALL void GL_APIENTRY glFlush (void); +GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode); +GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target); +GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL GLenum GL_APIENTRY glGetError (void); +GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); +GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer); +GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program); +GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader); +GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width); +GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program); +GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void); +GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgram (GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program); +GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif +#endif /* GL_ES_VERSION_2_0 */ + +#ifndef GL_ES_VERSION_3_0 +#define GL_ES_VERSION_3_0 1 +typedef unsigned short GLhalf; +#define GL_READ_BUFFER 0x0C02 +#define GL_UNPACK_ROW_LENGTH 0x0CF2 +#define GL_UNPACK_SKIP_ROWS 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS 0x0CF4 +#define GL_PACK_ROW_LENGTH 0x0D02 +#define GL_PACK_SKIP_ROWS 0x0D03 +#define GL_PACK_SKIP_PIXELS 0x0D04 +#define GL_COLOR 0x1800 +#define GL_DEPTH 0x1801 +#define GL_STENCIL 0x1802 +#define GL_RED 0x1903 +#define GL_RGB8 0x8051 +#define GL_RGBA8 0x8058 +#define GL_RGB10_A2 0x8059 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_TEXTURE_3D 0x806F +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_CURRENT_QUERY 0x8865 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_STREAM_READ 0x88E1 +#define GL_STREAM_COPY 0x88E2 +#define GL_STATIC_READ 0x88E5 +#define GL_STATIC_COPY 0x88E6 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_MAJOR_VERSION 0x821B +#define GL_MINOR_VERSION 0x821C +#define GL_NUM_EXTENSIONS 0x821D +#define GL_RGBA32F 0x8814 +#define GL_RGB32F 0x8815 +#define GL_RGBA16F 0x881A +#define GL_RGB16F 0x881B +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_RGB9_E5 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_RGBA32UI 0x8D70 +#define GL_RGB32UI 0x8D71 +#define GL_RGBA16UI 0x8D76 +#define GL_RGB16UI 0x8D77 +#define GL_RGBA8UI 0x8D7C +#define GL_RGB8UI 0x8D7D +#define GL_RGBA32I 0x8D82 +#define GL_RGB32I 0x8D83 +#define GL_RGBA16I 0x8D88 +#define GL_RGB16I 0x8D89 +#define GL_RGBA8I 0x8D8E +#define GL_RGB8I 0x8D8F +#define GL_RED_INTEGER 0x8D94 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_COLOR_ATTACHMENT16 0x8CF0 +#define GL_COLOR_ATTACHMENT17 0x8CF1 +#define GL_COLOR_ATTACHMENT18 0x8CF2 +#define GL_COLOR_ATTACHMENT19 0x8CF3 +#define GL_COLOR_ATTACHMENT20 0x8CF4 +#define GL_COLOR_ATTACHMENT21 0x8CF5 +#define GL_COLOR_ATTACHMENT22 0x8CF6 +#define GL_COLOR_ATTACHMENT23 0x8CF7 +#define GL_COLOR_ATTACHMENT24 0x8CF8 +#define GL_COLOR_ATTACHMENT25 0x8CF9 +#define GL_COLOR_ATTACHMENT26 0x8CFA +#define GL_COLOR_ATTACHMENT27 0x8CFB +#define GL_COLOR_ATTACHMENT28 0x8CFC +#define GL_COLOR_ATTACHMENT29 0x8CFD +#define GL_COLOR_ATTACHMENT30 0x8CFE +#define GL_COLOR_ATTACHMENT31 0x8CFF +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_HALF_FLOAT 0x140B +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#define GL_RG 0x8227 +#define GL_RG_INTEGER 0x8228 +#define GL_R8 0x8229 +#define GL_RG8 0x822B +#define GL_R16F 0x822D +#define GL_R32F 0x822E +#define GL_RG16F 0x822F +#define GL_RG32F 0x8230 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69 +#define GL_COPY_READ_BUFFER 0x8F36 +#define GL_COPY_WRITE_BUFFER 0x8F37 +#define GL_COPY_READ_BUFFER_BINDING 0x8F36 +#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37 +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_INVALID_INDEX 0xFFFFFFFFu +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_OBJECT_TYPE 0x9112 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_STATUS 0x9114 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_UNSIGNALED 0x9118 +#define GL_SIGNALED 0x9119 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_CONDITION_SATISFIED 0x911C +#define GL_WAIT_FAILED 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A +#define GL_SAMPLER_BINDING 0x8919 +#define GL_RGB10_A2UI 0x906F +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_GREEN 0x1904 +#define GL_BLUE 0x1905 +#define GL_INT_2_10_10_10_REV 0x8D9F +#define GL_TRANSFORM_FEEDBACK 0x8E22 +#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25 +#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257 +#define GL_PROGRAM_BINARY_LENGTH 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE +#define GL_PROGRAM_BINARY_FORMATS 0x87FF +#define GL_COMPRESSED_R11_EAC 0x9270 +#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271 +#define GL_COMPRESSED_RG11_EAC 0x9272 +#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 +#define GL_COMPRESSED_RGB8_ETC2 0x9274 +#define GL_COMPRESSED_SRGB8_ETC2 0x9275 +#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 +#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277 +#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 +#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279 +#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F +#define GL_MAX_ELEMENT_INDEX 0x8D6B +#define GL_NUM_SAMPLE_COUNTS 0x9380 +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +typedef void (GL_APIENTRYP PFNGLREADBUFFERPROC) (GLenum src); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLGENQUERIESPROC) (GLsizei n, GLuint *ids); +typedef void (GL_APIENTRYP PFNGLDELETEQUERIESPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISQUERYPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLBEGINQUERYPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLENDQUERYPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETQUERYIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVPROC) (GLuint id, GLenum pname, GLuint *params); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFERPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVPROC) (GLenum target, GLenum pname, void **params); +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSPROC) (GLsizei n, const GLenum *bufs); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VPROC) (GLenum target, GLuint index, GLint *data); +typedef void (GL_APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC) (GLenum primitiveMode); +typedef void (GL_APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERRANGEPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERBASEPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC) (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +typedef void (GL_APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC) (GLuint index, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IVPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC) (GLuint index, const GLuint *v); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMUIVPROC) (GLuint program, GLint location, GLuint *params); +typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATALOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIPROC) (GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIPROC) (GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERIVPROC) (GLenum buffer, GLint drawbuffer, const GLint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERUIVPROC) (GLenum buffer, GLint drawbuffer, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFVPROC) (GLenum buffer, GLint drawbuffer, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFIPROC) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGIPROC) (GLenum name, GLuint index); +typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMINDICESPROC) (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC) (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC) (GLuint program, const GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VPROC) (GLenum pname, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64I_VPROC) (GLenum target, GLuint index, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC) (GLenum target, GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGENSAMPLERSPROC) (GLsizei count, GLuint *samplers); +typedef void (GL_APIENTRYP PFNGLDELETESAMPLERSPROC) (GLsizei count, const GLuint *samplers); +typedef GLboolean (GL_APIENTRYP PFNGLISSAMPLERPROC) (GLuint sampler); +typedef void (GL_APIENTRYP PFNGLBINDSAMPLERPROC) (GLuint unit, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIPROC) (GLuint sampler, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFPROC) (GLuint sampler, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, const GLfloat *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC) (GLuint index, GLuint divisor); +typedef void (GL_APIENTRYP PFNGLBINDTRANSFORMFEEDBACKPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSPROC) (GLsizei n, const GLuint *ids); +typedef void (GL_APIENTRYP PFNGLGENTRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISTRANSFORMFEEDBACKPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIPROC) (GLuint program, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLINVALIDATEFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +typedef void (GL_APIENTRYP PFNGLINVALIDATESUBFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATIVPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBuffer (GLenum src); +GL_APICALL void GL_APIENTRY glDrawRangeElements (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glTexImage3D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glCopyTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glCompressedTexImage3D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glGenQueries (GLsizei n, GLuint *ids); +GL_APICALL void GL_APIENTRY glDeleteQueries (GLsizei n, const GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsQuery (GLuint id); +GL_APICALL void GL_APIENTRY glBeginQuery (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glEndQuery (GLenum target); +GL_APICALL void GL_APIENTRY glGetQueryiv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectuiv (GLuint id, GLenum pname, GLuint *params); +GL_APICALL GLboolean GL_APIENTRY glUnmapBuffer (GLenum target); +GL_APICALL void GL_APIENTRY glGetBufferPointerv (GLenum target, GLenum pname, void **params); +GL_APICALL void GL_APIENTRY glDrawBuffers (GLsizei n, const GLenum *bufs); +GL_APICALL void GL_APIENTRY glUniformMatrix2x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glBlitFramebuffer (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTextureLayer (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GL_APICALL void *GL_APIENTRY glMapBufferRange (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_APICALL void GL_APIENTRY glFlushMappedBufferRange (GLenum target, GLintptr offset, GLsizeiptr length); +GL_APICALL void GL_APIENTRY glBindVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glDeleteVertexArrays (GLsizei n, const GLuint *arrays); +GL_APICALL void GL_APIENTRY glGenVertexArrays (GLsizei n, GLuint *arrays); +GL_APICALL GLboolean GL_APIENTRY glIsVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glGetIntegeri_v (GLenum target, GLuint index, GLint *data); +GL_APICALL void GL_APIENTRY glBeginTransformFeedback (GLenum primitiveMode); +GL_APICALL void GL_APIENTRY glEndTransformFeedback (void); +GL_APICALL void GL_APIENTRY glBindBufferRange (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glBindBufferBase (GLenum target, GLuint index, GLuint buffer); +GL_APICALL void GL_APIENTRY glTransformFeedbackVaryings (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +GL_APICALL void GL_APIENTRY glGetTransformFeedbackVarying (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glVertexAttribIPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glGetVertexAttribIiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribIuiv (GLuint index, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glVertexAttribI4i (GLuint index, GLint x, GLint y, GLint z, GLint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4ui (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4iv (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glVertexAttribI4uiv (GLuint index, const GLuint *v); +GL_APICALL void GL_APIENTRY glGetUniformuiv (GLuint program, GLint location, GLuint *params); +GL_APICALL GLint GL_APIENTRY glGetFragDataLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glUniform1ui (GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glUniform2ui (GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glUniform3ui (GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glUniform4ui (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glUniform1uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform2uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform3uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform4uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferiv (GLenum buffer, GLint drawbuffer, const GLint *value); +GL_APICALL void GL_APIENTRY glClearBufferuiv (GLenum buffer, GLint drawbuffer, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferfv (GLenum buffer, GLint drawbuffer, const GLfloat *value); +GL_APICALL void GL_APIENTRY glClearBufferfi (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +GL_APICALL const GLubyte *GL_APIENTRY glGetStringi (GLenum name, GLuint index); +GL_APICALL void GL_APIENTRY glCopyBufferSubData (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glGetUniformIndices (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +GL_APICALL void GL_APIENTRY glGetActiveUniformsiv (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetUniformBlockIndex (GLuint program, const GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockiv (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockName (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glUniformBlockBinding (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +GL_APICALL void GL_APIENTRY glDrawArraysInstanced (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +GL_APICALL void GL_APIENTRY glDrawElementsInstanced (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +GL_APICALL GLsync GL_APIENTRY glFenceSync (GLenum condition, GLbitfield flags); +GL_APICALL GLboolean GL_APIENTRY glIsSync (GLsync sync); +GL_APICALL void GL_APIENTRY glDeleteSync (GLsync sync); +GL_APICALL GLenum GL_APIENTRY glClientWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glGetInteger64v (GLenum pname, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetSynciv (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +GL_APICALL void GL_APIENTRY glGetInteger64i_v (GLenum target, GLuint index, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteri64v (GLenum target, GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGenSamplers (GLsizei count, GLuint *samplers); +GL_APICALL void GL_APIENTRY glDeleteSamplers (GLsizei count, const GLuint *samplers); +GL_APICALL GLboolean GL_APIENTRY glIsSampler (GLuint sampler); +GL_APICALL void GL_APIENTRY glBindSampler (GLuint unit, GLuint sampler); +GL_APICALL void GL_APIENTRY glSamplerParameteri (GLuint sampler, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glVertexAttribDivisor (GLuint index, GLuint divisor); +GL_APICALL void GL_APIENTRY glBindTransformFeedback (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glDeleteTransformFeedbacks (GLsizei n, const GLuint *ids); +GL_APICALL void GL_APIENTRY glGenTransformFeedbacks (GLsizei n, GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsTransformFeedback (GLuint id); +GL_APICALL void GL_APIENTRY glPauseTransformFeedback (void); +GL_APICALL void GL_APIENTRY glResumeTransformFeedback (void); +GL_APICALL void GL_APIENTRY glGetProgramBinary (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +GL_APICALL void GL_APIENTRY glProgramBinary (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glProgramParameteri (GLuint program, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glInvalidateFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments); +GL_APICALL void GL_APIENTRY glInvalidateSubFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage2D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage3D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_APICALL void GL_APIENTRY glGetInternalformativ (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_ES_VERSION_3_0 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/GLES3/gl31.h b/sources/khronos/GLES3/gl31.h new file mode 100755 index 00000000..ae443a02 --- /dev/null +++ b/sources/khronos/GLES3/gl31.h @@ -0,0 +1,1528 @@ +#ifndef __gl31_h_ +#define __gl31_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** http://www.opengl.org/registry/ +** +** Khronos $Revision$ on $Date$ +*/ + +#include + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +#ifndef GL_GLES_PROTOTYPES +#define GL_GLES_PROTOTYPES 1 +#endif + +/* Generated on date 20161024 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9]|3\.[01] + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_ES_VERSION_2_0 +#define GL_ES_VERSION_2_0 1 +#include +typedef khronos_int8_t GLbyte; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLfixed; +typedef short GLshort; +typedef unsigned short GLushort; +typedef void GLvoid; +typedef struct __GLsync *GLsync; +typedef khronos_int64_t GLint64; +typedef khronos_uint64_t GLuint64; +typedef unsigned int GLenum; +typedef unsigned int GLuint; +typedef char GLchar; +typedef khronos_float_t GLfloat; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_uint8_t GLubyte; +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_FUNC_ADD 0x8006 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_BLEND 0x0BE2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_LINE_WIDTH 0x0B21 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VIEWPORT 0x0BA2 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_SHADER_TYPE 0x8B4F +#define GL_DELETE_STATUS 0x8B80 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_INVERT 0x150A +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGB565 0x8D62 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_NONE 0 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s); +typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures); +typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer); +typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode); +typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width); +typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture); +GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target); +GL_APICALL void GL_APIENTRY glClear (GLbitfield mask); +GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d); +GL_APICALL void GL_APIENTRY glClearStencil (GLint s); +GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader); +GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL GLuint GL_APIENTRY glCreateProgram (void); +GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type); +GL_APICALL void GL_APIENTRY glCullFace (GLenum mode); +GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program); +GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader); +GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func); +GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag); +GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glDisable (GLenum cap); +GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glEnable (GLenum cap); +GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glFinish (void); +GL_APICALL void GL_APIENTRY glFlush (void); +GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode); +GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target); +GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL GLenum GL_APIENTRY glGetError (void); +GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); +GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer); +GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program); +GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader); +GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width); +GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program); +GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void); +GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgram (GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program); +GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif +#endif /* GL_ES_VERSION_2_0 */ + +#ifndef GL_ES_VERSION_3_0 +#define GL_ES_VERSION_3_0 1 +typedef unsigned short GLhalf; +#define GL_READ_BUFFER 0x0C02 +#define GL_UNPACK_ROW_LENGTH 0x0CF2 +#define GL_UNPACK_SKIP_ROWS 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS 0x0CF4 +#define GL_PACK_ROW_LENGTH 0x0D02 +#define GL_PACK_SKIP_ROWS 0x0D03 +#define GL_PACK_SKIP_PIXELS 0x0D04 +#define GL_COLOR 0x1800 +#define GL_DEPTH 0x1801 +#define GL_STENCIL 0x1802 +#define GL_RED 0x1903 +#define GL_RGB8 0x8051 +#define GL_RGBA8 0x8058 +#define GL_RGB10_A2 0x8059 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_TEXTURE_3D 0x806F +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_CURRENT_QUERY 0x8865 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_STREAM_READ 0x88E1 +#define GL_STREAM_COPY 0x88E2 +#define GL_STATIC_READ 0x88E5 +#define GL_STATIC_COPY 0x88E6 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_MAJOR_VERSION 0x821B +#define GL_MINOR_VERSION 0x821C +#define GL_NUM_EXTENSIONS 0x821D +#define GL_RGBA32F 0x8814 +#define GL_RGB32F 0x8815 +#define GL_RGBA16F 0x881A +#define GL_RGB16F 0x881B +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_RGB9_E5 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_RGBA32UI 0x8D70 +#define GL_RGB32UI 0x8D71 +#define GL_RGBA16UI 0x8D76 +#define GL_RGB16UI 0x8D77 +#define GL_RGBA8UI 0x8D7C +#define GL_RGB8UI 0x8D7D +#define GL_RGBA32I 0x8D82 +#define GL_RGB32I 0x8D83 +#define GL_RGBA16I 0x8D88 +#define GL_RGB16I 0x8D89 +#define GL_RGBA8I 0x8D8E +#define GL_RGB8I 0x8D8F +#define GL_RED_INTEGER 0x8D94 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_COLOR_ATTACHMENT16 0x8CF0 +#define GL_COLOR_ATTACHMENT17 0x8CF1 +#define GL_COLOR_ATTACHMENT18 0x8CF2 +#define GL_COLOR_ATTACHMENT19 0x8CF3 +#define GL_COLOR_ATTACHMENT20 0x8CF4 +#define GL_COLOR_ATTACHMENT21 0x8CF5 +#define GL_COLOR_ATTACHMENT22 0x8CF6 +#define GL_COLOR_ATTACHMENT23 0x8CF7 +#define GL_COLOR_ATTACHMENT24 0x8CF8 +#define GL_COLOR_ATTACHMENT25 0x8CF9 +#define GL_COLOR_ATTACHMENT26 0x8CFA +#define GL_COLOR_ATTACHMENT27 0x8CFB +#define GL_COLOR_ATTACHMENT28 0x8CFC +#define GL_COLOR_ATTACHMENT29 0x8CFD +#define GL_COLOR_ATTACHMENT30 0x8CFE +#define GL_COLOR_ATTACHMENT31 0x8CFF +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_HALF_FLOAT 0x140B +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#define GL_RG 0x8227 +#define GL_RG_INTEGER 0x8228 +#define GL_R8 0x8229 +#define GL_RG8 0x822B +#define GL_R16F 0x822D +#define GL_R32F 0x822E +#define GL_RG16F 0x822F +#define GL_RG32F 0x8230 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69 +#define GL_COPY_READ_BUFFER 0x8F36 +#define GL_COPY_WRITE_BUFFER 0x8F37 +#define GL_COPY_READ_BUFFER_BINDING 0x8F36 +#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37 +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_INVALID_INDEX 0xFFFFFFFFu +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_OBJECT_TYPE 0x9112 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_STATUS 0x9114 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_UNSIGNALED 0x9118 +#define GL_SIGNALED 0x9119 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_CONDITION_SATISFIED 0x911C +#define GL_WAIT_FAILED 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A +#define GL_SAMPLER_BINDING 0x8919 +#define GL_RGB10_A2UI 0x906F +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_GREEN 0x1904 +#define GL_BLUE 0x1905 +#define GL_INT_2_10_10_10_REV 0x8D9F +#define GL_TRANSFORM_FEEDBACK 0x8E22 +#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25 +#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257 +#define GL_PROGRAM_BINARY_LENGTH 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE +#define GL_PROGRAM_BINARY_FORMATS 0x87FF +#define GL_COMPRESSED_R11_EAC 0x9270 +#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271 +#define GL_COMPRESSED_RG11_EAC 0x9272 +#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 +#define GL_COMPRESSED_RGB8_ETC2 0x9274 +#define GL_COMPRESSED_SRGB8_ETC2 0x9275 +#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 +#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277 +#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 +#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279 +#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F +#define GL_MAX_ELEMENT_INDEX 0x8D6B +#define GL_NUM_SAMPLE_COUNTS 0x9380 +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +typedef void (GL_APIENTRYP PFNGLREADBUFFERPROC) (GLenum src); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLGENQUERIESPROC) (GLsizei n, GLuint *ids); +typedef void (GL_APIENTRYP PFNGLDELETEQUERIESPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISQUERYPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLBEGINQUERYPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLENDQUERYPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETQUERYIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVPROC) (GLuint id, GLenum pname, GLuint *params); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFERPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVPROC) (GLenum target, GLenum pname, void **params); +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSPROC) (GLsizei n, const GLenum *bufs); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VPROC) (GLenum target, GLuint index, GLint *data); +typedef void (GL_APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC) (GLenum primitiveMode); +typedef void (GL_APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERRANGEPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERBASEPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC) (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +typedef void (GL_APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC) (GLuint index, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IVPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC) (GLuint index, const GLuint *v); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMUIVPROC) (GLuint program, GLint location, GLuint *params); +typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATALOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIPROC) (GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIPROC) (GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERIVPROC) (GLenum buffer, GLint drawbuffer, const GLint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERUIVPROC) (GLenum buffer, GLint drawbuffer, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFVPROC) (GLenum buffer, GLint drawbuffer, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFIPROC) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGIPROC) (GLenum name, GLuint index); +typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMINDICESPROC) (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC) (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC) (GLuint program, const GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VPROC) (GLenum pname, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64I_VPROC) (GLenum target, GLuint index, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC) (GLenum target, GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGENSAMPLERSPROC) (GLsizei count, GLuint *samplers); +typedef void (GL_APIENTRYP PFNGLDELETESAMPLERSPROC) (GLsizei count, const GLuint *samplers); +typedef GLboolean (GL_APIENTRYP PFNGLISSAMPLERPROC) (GLuint sampler); +typedef void (GL_APIENTRYP PFNGLBINDSAMPLERPROC) (GLuint unit, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIPROC) (GLuint sampler, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFPROC) (GLuint sampler, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, const GLfloat *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC) (GLuint index, GLuint divisor); +typedef void (GL_APIENTRYP PFNGLBINDTRANSFORMFEEDBACKPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSPROC) (GLsizei n, const GLuint *ids); +typedef void (GL_APIENTRYP PFNGLGENTRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISTRANSFORMFEEDBACKPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIPROC) (GLuint program, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLINVALIDATEFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +typedef void (GL_APIENTRYP PFNGLINVALIDATESUBFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATIVPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBuffer (GLenum src); +GL_APICALL void GL_APIENTRY glDrawRangeElements (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glTexImage3D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glCopyTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glCompressedTexImage3D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glGenQueries (GLsizei n, GLuint *ids); +GL_APICALL void GL_APIENTRY glDeleteQueries (GLsizei n, const GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsQuery (GLuint id); +GL_APICALL void GL_APIENTRY glBeginQuery (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glEndQuery (GLenum target); +GL_APICALL void GL_APIENTRY glGetQueryiv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectuiv (GLuint id, GLenum pname, GLuint *params); +GL_APICALL GLboolean GL_APIENTRY glUnmapBuffer (GLenum target); +GL_APICALL void GL_APIENTRY glGetBufferPointerv (GLenum target, GLenum pname, void **params); +GL_APICALL void GL_APIENTRY glDrawBuffers (GLsizei n, const GLenum *bufs); +GL_APICALL void GL_APIENTRY glUniformMatrix2x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glBlitFramebuffer (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTextureLayer (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GL_APICALL void *GL_APIENTRY glMapBufferRange (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_APICALL void GL_APIENTRY glFlushMappedBufferRange (GLenum target, GLintptr offset, GLsizeiptr length); +GL_APICALL void GL_APIENTRY glBindVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glDeleteVertexArrays (GLsizei n, const GLuint *arrays); +GL_APICALL void GL_APIENTRY glGenVertexArrays (GLsizei n, GLuint *arrays); +GL_APICALL GLboolean GL_APIENTRY glIsVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glGetIntegeri_v (GLenum target, GLuint index, GLint *data); +GL_APICALL void GL_APIENTRY glBeginTransformFeedback (GLenum primitiveMode); +GL_APICALL void GL_APIENTRY glEndTransformFeedback (void); +GL_APICALL void GL_APIENTRY glBindBufferRange (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glBindBufferBase (GLenum target, GLuint index, GLuint buffer); +GL_APICALL void GL_APIENTRY glTransformFeedbackVaryings (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +GL_APICALL void GL_APIENTRY glGetTransformFeedbackVarying (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glVertexAttribIPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glGetVertexAttribIiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribIuiv (GLuint index, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glVertexAttribI4i (GLuint index, GLint x, GLint y, GLint z, GLint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4ui (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4iv (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glVertexAttribI4uiv (GLuint index, const GLuint *v); +GL_APICALL void GL_APIENTRY glGetUniformuiv (GLuint program, GLint location, GLuint *params); +GL_APICALL GLint GL_APIENTRY glGetFragDataLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glUniform1ui (GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glUniform2ui (GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glUniform3ui (GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glUniform4ui (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glUniform1uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform2uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform3uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform4uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferiv (GLenum buffer, GLint drawbuffer, const GLint *value); +GL_APICALL void GL_APIENTRY glClearBufferuiv (GLenum buffer, GLint drawbuffer, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferfv (GLenum buffer, GLint drawbuffer, const GLfloat *value); +GL_APICALL void GL_APIENTRY glClearBufferfi (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +GL_APICALL const GLubyte *GL_APIENTRY glGetStringi (GLenum name, GLuint index); +GL_APICALL void GL_APIENTRY glCopyBufferSubData (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glGetUniformIndices (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +GL_APICALL void GL_APIENTRY glGetActiveUniformsiv (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetUniformBlockIndex (GLuint program, const GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockiv (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockName (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glUniformBlockBinding (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +GL_APICALL void GL_APIENTRY glDrawArraysInstanced (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +GL_APICALL void GL_APIENTRY glDrawElementsInstanced (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +GL_APICALL GLsync GL_APIENTRY glFenceSync (GLenum condition, GLbitfield flags); +GL_APICALL GLboolean GL_APIENTRY glIsSync (GLsync sync); +GL_APICALL void GL_APIENTRY glDeleteSync (GLsync sync); +GL_APICALL GLenum GL_APIENTRY glClientWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glGetInteger64v (GLenum pname, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetSynciv (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +GL_APICALL void GL_APIENTRY glGetInteger64i_v (GLenum target, GLuint index, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteri64v (GLenum target, GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGenSamplers (GLsizei count, GLuint *samplers); +GL_APICALL void GL_APIENTRY glDeleteSamplers (GLsizei count, const GLuint *samplers); +GL_APICALL GLboolean GL_APIENTRY glIsSampler (GLuint sampler); +GL_APICALL void GL_APIENTRY glBindSampler (GLuint unit, GLuint sampler); +GL_APICALL void GL_APIENTRY glSamplerParameteri (GLuint sampler, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glVertexAttribDivisor (GLuint index, GLuint divisor); +GL_APICALL void GL_APIENTRY glBindTransformFeedback (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glDeleteTransformFeedbacks (GLsizei n, const GLuint *ids); +GL_APICALL void GL_APIENTRY glGenTransformFeedbacks (GLsizei n, GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsTransformFeedback (GLuint id); +GL_APICALL void GL_APIENTRY glPauseTransformFeedback (void); +GL_APICALL void GL_APIENTRY glResumeTransformFeedback (void); +GL_APICALL void GL_APIENTRY glGetProgramBinary (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +GL_APICALL void GL_APIENTRY glProgramBinary (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glProgramParameteri (GLuint program, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glInvalidateFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments); +GL_APICALL void GL_APIENTRY glInvalidateSubFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage2D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage3D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_APICALL void GL_APIENTRY glGetInternalformativ (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_ES_VERSION_3_0 */ + +#ifndef GL_ES_VERSION_3_1 +#define GL_ES_VERSION_3_1 1 +#define GL_COMPUTE_SHADER 0x91B9 +#define GL_MAX_COMPUTE_UNIFORM_BLOCKS 0x91BB +#define GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS 0x91BC +#define GL_MAX_COMPUTE_IMAGE_UNIFORMS 0x91BD +#define GL_MAX_COMPUTE_SHARED_MEMORY_SIZE 0x8262 +#define GL_MAX_COMPUTE_UNIFORM_COMPONENTS 0x8263 +#define GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS 0x8264 +#define GL_MAX_COMPUTE_ATOMIC_COUNTERS 0x8265 +#define GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS 0x8266 +#define GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS 0x90EB +#define GL_MAX_COMPUTE_WORK_GROUP_COUNT 0x91BE +#define GL_MAX_COMPUTE_WORK_GROUP_SIZE 0x91BF +#define GL_COMPUTE_WORK_GROUP_SIZE 0x8267 +#define GL_DISPATCH_INDIRECT_BUFFER 0x90EE +#define GL_DISPATCH_INDIRECT_BUFFER_BINDING 0x90EF +#define GL_COMPUTE_SHADER_BIT 0x00000020 +#define GL_DRAW_INDIRECT_BUFFER 0x8F3F +#define GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43 +#define GL_MAX_UNIFORM_LOCATIONS 0x826E +#define GL_FRAMEBUFFER_DEFAULT_WIDTH 0x9310 +#define GL_FRAMEBUFFER_DEFAULT_HEIGHT 0x9311 +#define GL_FRAMEBUFFER_DEFAULT_SAMPLES 0x9313 +#define GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS 0x9314 +#define GL_MAX_FRAMEBUFFER_WIDTH 0x9315 +#define GL_MAX_FRAMEBUFFER_HEIGHT 0x9316 +#define GL_MAX_FRAMEBUFFER_SAMPLES 0x9318 +#define GL_UNIFORM 0x92E1 +#define GL_UNIFORM_BLOCK 0x92E2 +#define GL_PROGRAM_INPUT 0x92E3 +#define GL_PROGRAM_OUTPUT 0x92E4 +#define GL_BUFFER_VARIABLE 0x92E5 +#define GL_SHADER_STORAGE_BLOCK 0x92E6 +#define GL_ATOMIC_COUNTER_BUFFER 0x92C0 +#define GL_TRANSFORM_FEEDBACK_VARYING 0x92F4 +#define GL_ACTIVE_RESOURCES 0x92F5 +#define GL_MAX_NAME_LENGTH 0x92F6 +#define GL_MAX_NUM_ACTIVE_VARIABLES 0x92F7 +#define GL_NAME_LENGTH 0x92F9 +#define GL_TYPE 0x92FA +#define GL_ARRAY_SIZE 0x92FB +#define GL_OFFSET 0x92FC +#define GL_BLOCK_INDEX 0x92FD +#define GL_ARRAY_STRIDE 0x92FE +#define GL_MATRIX_STRIDE 0x92FF +#define GL_IS_ROW_MAJOR 0x9300 +#define GL_ATOMIC_COUNTER_BUFFER_INDEX 0x9301 +#define GL_BUFFER_BINDING 0x9302 +#define GL_BUFFER_DATA_SIZE 0x9303 +#define GL_NUM_ACTIVE_VARIABLES 0x9304 +#define GL_ACTIVE_VARIABLES 0x9305 +#define GL_REFERENCED_BY_VERTEX_SHADER 0x9306 +#define GL_REFERENCED_BY_FRAGMENT_SHADER 0x930A +#define GL_REFERENCED_BY_COMPUTE_SHADER 0x930B +#define GL_TOP_LEVEL_ARRAY_SIZE 0x930C +#define GL_TOP_LEVEL_ARRAY_STRIDE 0x930D +#define GL_LOCATION 0x930E +#define GL_VERTEX_SHADER_BIT 0x00000001 +#define GL_FRAGMENT_SHADER_BIT 0x00000002 +#define GL_ALL_SHADER_BITS 0xFFFFFFFF +#define GL_PROGRAM_SEPARABLE 0x8258 +#define GL_ACTIVE_PROGRAM 0x8259 +#define GL_PROGRAM_PIPELINE_BINDING 0x825A +#define GL_ATOMIC_COUNTER_BUFFER_BINDING 0x92C1 +#define GL_ATOMIC_COUNTER_BUFFER_START 0x92C2 +#define GL_ATOMIC_COUNTER_BUFFER_SIZE 0x92C3 +#define GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS 0x92CC +#define GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS 0x92D0 +#define GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS 0x92D1 +#define GL_MAX_VERTEX_ATOMIC_COUNTERS 0x92D2 +#define GL_MAX_FRAGMENT_ATOMIC_COUNTERS 0x92D6 +#define GL_MAX_COMBINED_ATOMIC_COUNTERS 0x92D7 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE 0x92D8 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS 0x92DC +#define GL_ACTIVE_ATOMIC_COUNTER_BUFFERS 0x92D9 +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB +#define GL_MAX_IMAGE_UNITS 0x8F38 +#define GL_MAX_VERTEX_IMAGE_UNIFORMS 0x90CA +#define GL_MAX_FRAGMENT_IMAGE_UNIFORMS 0x90CE +#define GL_MAX_COMBINED_IMAGE_UNIFORMS 0x90CF +#define GL_IMAGE_BINDING_NAME 0x8F3A +#define GL_IMAGE_BINDING_LEVEL 0x8F3B +#define GL_IMAGE_BINDING_LAYERED 0x8F3C +#define GL_IMAGE_BINDING_LAYER 0x8F3D +#define GL_IMAGE_BINDING_ACCESS 0x8F3E +#define GL_IMAGE_BINDING_FORMAT 0x906E +#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT 0x00000001 +#define GL_ELEMENT_ARRAY_BARRIER_BIT 0x00000002 +#define GL_UNIFORM_BARRIER_BIT 0x00000004 +#define GL_TEXTURE_FETCH_BARRIER_BIT 0x00000008 +#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT 0x00000020 +#define GL_COMMAND_BARRIER_BIT 0x00000040 +#define GL_PIXEL_BUFFER_BARRIER_BIT 0x00000080 +#define GL_TEXTURE_UPDATE_BARRIER_BIT 0x00000100 +#define GL_BUFFER_UPDATE_BARRIER_BIT 0x00000200 +#define GL_FRAMEBUFFER_BARRIER_BIT 0x00000400 +#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT 0x00000800 +#define GL_ATOMIC_COUNTER_BARRIER_BIT 0x00001000 +#define GL_ALL_BARRIER_BITS 0xFFFFFFFF +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_IMAGE_FORMAT_COMPATIBILITY_TYPE 0x90C7 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE 0x90C8 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS 0x90C9 +#define GL_READ_ONLY 0x88B8 +#define GL_WRITE_ONLY 0x88B9 +#define GL_READ_WRITE 0x88BA +#define GL_SHADER_STORAGE_BUFFER 0x90D2 +#define GL_SHADER_STORAGE_BUFFER_BINDING 0x90D3 +#define GL_SHADER_STORAGE_BUFFER_START 0x90D4 +#define GL_SHADER_STORAGE_BUFFER_SIZE 0x90D5 +#define GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS 0x90D6 +#define GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS 0x90DA +#define GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS 0x90DB +#define GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS 0x90DC +#define GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS 0x90DD +#define GL_MAX_SHADER_STORAGE_BLOCK_SIZE 0x90DE +#define GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT 0x90DF +#define GL_SHADER_STORAGE_BARRIER_BIT 0x00002000 +#define GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES 0x8F39 +#define GL_DEPTH_STENCIL_TEXTURE_MODE 0x90EA +#define GL_STENCIL_INDEX 0x1901 +#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5E +#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5F +#define GL_SAMPLE_POSITION 0x8E50 +#define GL_SAMPLE_MASK 0x8E51 +#define GL_SAMPLE_MASK_VALUE 0x8E52 +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59 +#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E +#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F +#define GL_MAX_INTEGER_SAMPLES 0x9110 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104 +#define GL_TEXTURE_SAMPLES 0x9106 +#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107 +#define GL_TEXTURE_WIDTH 0x1000 +#define GL_TEXTURE_HEIGHT 0x1001 +#define GL_TEXTURE_DEPTH 0x8071 +#define GL_TEXTURE_INTERNAL_FORMAT 0x1003 +#define GL_TEXTURE_RED_SIZE 0x805C +#define GL_TEXTURE_GREEN_SIZE 0x805D +#define GL_TEXTURE_BLUE_SIZE 0x805E +#define GL_TEXTURE_ALPHA_SIZE 0x805F +#define GL_TEXTURE_DEPTH_SIZE 0x884A +#define GL_TEXTURE_STENCIL_SIZE 0x88F1 +#define GL_TEXTURE_SHARED_SIZE 0x8C3F +#define GL_TEXTURE_RED_TYPE 0x8C10 +#define GL_TEXTURE_GREEN_TYPE 0x8C11 +#define GL_TEXTURE_BLUE_TYPE 0x8C12 +#define GL_TEXTURE_ALPHA_TYPE 0x8C13 +#define GL_TEXTURE_DEPTH_TYPE 0x8C16 +#define GL_TEXTURE_COMPRESSED 0x86A1 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A +#define GL_VERTEX_ATTRIB_BINDING 0x82D4 +#define GL_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D5 +#define GL_VERTEX_BINDING_DIVISOR 0x82D6 +#define GL_VERTEX_BINDING_OFFSET 0x82D7 +#define GL_VERTEX_BINDING_STRIDE 0x82D8 +#define GL_VERTEX_BINDING_BUFFER 0x8F4F +#define GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D9 +#define GL_MAX_VERTEX_ATTRIB_BINDINGS 0x82DA +#define GL_MAX_VERTEX_ATTRIB_STRIDE 0x82E5 +typedef void (GL_APIENTRYP PFNGLDISPATCHCOMPUTEPROC) (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +typedef void (GL_APIENTRYP PFNGLDISPATCHCOMPUTEINDIRECTPROC) (GLintptr indirect); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINDIRECTPROC) (GLenum mode, const void *indirect); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINDIRECTPROC) (GLenum mode, GLenum type, const void *indirect); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINTERFACEIVPROC) (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEINDEXPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCENAMEPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEIVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMSTAGESPROC) (GLuint pipeline, GLbitfield stages, GLuint program); +typedef void (GL_APIENTRYP PFNGLACTIVESHADERPROGRAMPROC) (GLuint pipeline, GLuint program); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROGRAMVPROC) (GLenum type, GLsizei count, const GLchar *const*strings); +typedef void (GL_APIENTRYP PFNGLBINDPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPIPELINESPROC) (GLsizei n, const GLuint *pipelines); +typedef void (GL_APIENTRYP PFNGLGENPROGRAMPIPELINESPROC) (GLsizei n, GLuint *pipelines); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEIVPROC) (GLuint pipeline, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IPROC) (GLuint program, GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IPROC) (GLuint program, GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIPROC) (GLuint program, GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FPROC) (GLuint program, GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLBINDIMAGETEXTUREPROC) (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANI_VPROC) (GLenum target, GLuint index, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLMEMORYBARRIERPROC) (GLbitfield barriers); +typedef void (GL_APIENTRYP PFNGLMEMORYBARRIERBYREGIONPROC) (GLbitfield barriers); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (GL_APIENTRYP PFNGLGETMULTISAMPLEFVPROC) (GLenum pname, GLuint index, GLfloat *val); +typedef void (GL_APIENTRYP PFNGLSAMPLEMASKIPROC) (GLuint maskNumber, GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLGETTEXLEVELPARAMETERIVPROC) (GLenum target, GLint level, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXLEVELPARAMETERFVPROC) (GLenum target, GLint level, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXBUFFERPROC) (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBBINDINGPROC) (GLuint attribindex, GLuint bindingindex); +typedef void (GL_APIENTRYP PFNGLVERTEXBINDINGDIVISORPROC) (GLuint bindingindex, GLuint divisor); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glDispatchCompute (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +GL_APICALL void GL_APIENTRY glDispatchComputeIndirect (GLintptr indirect); +GL_APICALL void GL_APIENTRY glDrawArraysIndirect (GLenum mode, const void *indirect); +GL_APICALL void GL_APIENTRY glDrawElementsIndirect (GLenum mode, GLenum type, const void *indirect); +GL_APICALL void GL_APIENTRY glFramebufferParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glGetFramebufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInterfaceiv (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetProgramResourceIndex (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetProgramResourceName (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +GL_APICALL void GL_APIENTRY glGetProgramResourceiv (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetProgramResourceLocation (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL void GL_APIENTRY glUseProgramStages (GLuint pipeline, GLbitfield stages, GLuint program); +GL_APICALL void GL_APIENTRY glActiveShaderProgram (GLuint pipeline, GLuint program); +GL_APICALL GLuint GL_APIENTRY glCreateShaderProgramv (GLenum type, GLsizei count, const GLchar *const*strings); +GL_APICALL void GL_APIENTRY glBindProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glDeleteProgramPipelines (GLsizei n, const GLuint *pipelines); +GL_APICALL void GL_APIENTRY glGenProgramPipelines (GLsizei n, GLuint *pipelines); +GL_APICALL GLboolean GL_APIENTRY glIsProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glGetProgramPipelineiv (GLuint pipeline, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glProgramUniform1i (GLuint program, GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2i (GLuint program, GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1ui (GLuint program, GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2ui (GLuint program, GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1f (GLuint program, GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glProgramUniform2f (GLuint program, GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glProgramUniform3f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glProgramUniform4f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glProgramUniform1iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform1uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform1fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform2fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform3fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform4fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glValidateProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glGetProgramPipelineInfoLog (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glBindImageTexture (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +GL_APICALL void GL_APIENTRY glGetBooleani_v (GLenum target, GLuint index, GLboolean *data); +GL_APICALL void GL_APIENTRY glMemoryBarrier (GLbitfield barriers); +GL_APICALL void GL_APIENTRY glMemoryBarrierByRegion (GLbitfield barriers); +GL_APICALL void GL_APIENTRY glTexStorage2DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +GL_APICALL void GL_APIENTRY glGetMultisamplefv (GLenum pname, GLuint index, GLfloat *val); +GL_APICALL void GL_APIENTRY glSampleMaski (GLuint maskNumber, GLbitfield mask); +GL_APICALL void GL_APIENTRY glGetTexLevelParameteriv (GLenum target, GLint level, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexLevelParameterfv (GLenum target, GLint level, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glBindVertexBuffer (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +GL_APICALL void GL_APIENTRY glVertexAttribFormat (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +GL_APICALL void GL_APIENTRY glVertexAttribIFormat (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +GL_APICALL void GL_APIENTRY glVertexAttribBinding (GLuint attribindex, GLuint bindingindex); +GL_APICALL void GL_APIENTRY glVertexBindingDivisor (GLuint bindingindex, GLuint divisor); +#endif +#endif /* GL_ES_VERSION_3_1 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/GLES3/gl32.h b/sources/khronos/GLES3/gl32.h new file mode 100755 index 00000000..80bbbc99 --- /dev/null +++ b/sources/khronos/GLES3/gl32.h @@ -0,0 +1,1829 @@ +#ifndef __gl32_h_ +#define __gl32_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** http://www.opengl.org/registry/ +** +** Khronos $Revision$ on $Date$ +*/ + +#include + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +#ifndef GL_GLES_PROTOTYPES +#define GL_GLES_PROTOTYPES 1 +#endif + +/* Generated on date 20161024 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9]|3\.[012] + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_ES_VERSION_2_0 +#define GL_ES_VERSION_2_0 1 +#include +typedef khronos_int8_t GLbyte; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLfixed; +typedef short GLshort; +typedef unsigned short GLushort; +typedef void GLvoid; +typedef struct __GLsync *GLsync; +typedef khronos_int64_t GLint64; +typedef khronos_uint64_t GLuint64; +typedef unsigned int GLenum; +typedef unsigned int GLuint; +typedef char GLchar; +typedef khronos_float_t GLfloat; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_uint8_t GLubyte; +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_FUNC_ADD 0x8006 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_BLEND 0x0BE2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_LINE_WIDTH 0x0B21 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VIEWPORT 0x0BA2 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_SHADER_TYPE 0x8B4F +#define GL_DELETE_STATUS 0x8B80 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_INVERT 0x150A +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGB565 0x8D62 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_NONE 0 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s); +typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures); +typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer); +typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode); +typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width); +typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture); +GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target); +GL_APICALL void GL_APIENTRY glClear (GLbitfield mask); +GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d); +GL_APICALL void GL_APIENTRY glClearStencil (GLint s); +GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader); +GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL GLuint GL_APIENTRY glCreateProgram (void); +GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type); +GL_APICALL void GL_APIENTRY glCullFace (GLenum mode); +GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program); +GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader); +GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func); +GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag); +GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glDisable (GLenum cap); +GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glEnable (GLenum cap); +GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glFinish (void); +GL_APICALL void GL_APIENTRY glFlush (void); +GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode); +GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target); +GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL GLenum GL_APIENTRY glGetError (void); +GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); +GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer); +GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program); +GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader); +GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width); +GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program); +GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void); +GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgram (GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program); +GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif +#endif /* GL_ES_VERSION_2_0 */ + +#ifndef GL_ES_VERSION_3_0 +#define GL_ES_VERSION_3_0 1 +typedef unsigned short GLhalf; +#define GL_READ_BUFFER 0x0C02 +#define GL_UNPACK_ROW_LENGTH 0x0CF2 +#define GL_UNPACK_SKIP_ROWS 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS 0x0CF4 +#define GL_PACK_ROW_LENGTH 0x0D02 +#define GL_PACK_SKIP_ROWS 0x0D03 +#define GL_PACK_SKIP_PIXELS 0x0D04 +#define GL_COLOR 0x1800 +#define GL_DEPTH 0x1801 +#define GL_STENCIL 0x1802 +#define GL_RED 0x1903 +#define GL_RGB8 0x8051 +#define GL_RGBA8 0x8058 +#define GL_RGB10_A2 0x8059 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_TEXTURE_3D 0x806F +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_CURRENT_QUERY 0x8865 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_STREAM_READ 0x88E1 +#define GL_STREAM_COPY 0x88E2 +#define GL_STATIC_READ 0x88E5 +#define GL_STATIC_COPY 0x88E6 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_MAJOR_VERSION 0x821B +#define GL_MINOR_VERSION 0x821C +#define GL_NUM_EXTENSIONS 0x821D +#define GL_RGBA32F 0x8814 +#define GL_RGB32F 0x8815 +#define GL_RGBA16F 0x881A +#define GL_RGB16F 0x881B +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_RGB9_E5 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_RGBA32UI 0x8D70 +#define GL_RGB32UI 0x8D71 +#define GL_RGBA16UI 0x8D76 +#define GL_RGB16UI 0x8D77 +#define GL_RGBA8UI 0x8D7C +#define GL_RGB8UI 0x8D7D +#define GL_RGBA32I 0x8D82 +#define GL_RGB32I 0x8D83 +#define GL_RGBA16I 0x8D88 +#define GL_RGB16I 0x8D89 +#define GL_RGBA8I 0x8D8E +#define GL_RGB8I 0x8D8F +#define GL_RED_INTEGER 0x8D94 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_COLOR_ATTACHMENT16 0x8CF0 +#define GL_COLOR_ATTACHMENT17 0x8CF1 +#define GL_COLOR_ATTACHMENT18 0x8CF2 +#define GL_COLOR_ATTACHMENT19 0x8CF3 +#define GL_COLOR_ATTACHMENT20 0x8CF4 +#define GL_COLOR_ATTACHMENT21 0x8CF5 +#define GL_COLOR_ATTACHMENT22 0x8CF6 +#define GL_COLOR_ATTACHMENT23 0x8CF7 +#define GL_COLOR_ATTACHMENT24 0x8CF8 +#define GL_COLOR_ATTACHMENT25 0x8CF9 +#define GL_COLOR_ATTACHMENT26 0x8CFA +#define GL_COLOR_ATTACHMENT27 0x8CFB +#define GL_COLOR_ATTACHMENT28 0x8CFC +#define GL_COLOR_ATTACHMENT29 0x8CFD +#define GL_COLOR_ATTACHMENT30 0x8CFE +#define GL_COLOR_ATTACHMENT31 0x8CFF +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_HALF_FLOAT 0x140B +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#define GL_RG 0x8227 +#define GL_RG_INTEGER 0x8228 +#define GL_R8 0x8229 +#define GL_RG8 0x822B +#define GL_R16F 0x822D +#define GL_R32F 0x822E +#define GL_RG16F 0x822F +#define GL_RG32F 0x8230 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69 +#define GL_COPY_READ_BUFFER 0x8F36 +#define GL_COPY_WRITE_BUFFER 0x8F37 +#define GL_COPY_READ_BUFFER_BINDING 0x8F36 +#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37 +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_INVALID_INDEX 0xFFFFFFFFu +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_OBJECT_TYPE 0x9112 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_STATUS 0x9114 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_UNSIGNALED 0x9118 +#define GL_SIGNALED 0x9119 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_CONDITION_SATISFIED 0x911C +#define GL_WAIT_FAILED 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A +#define GL_SAMPLER_BINDING 0x8919 +#define GL_RGB10_A2UI 0x906F +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_GREEN 0x1904 +#define GL_BLUE 0x1905 +#define GL_INT_2_10_10_10_REV 0x8D9F +#define GL_TRANSFORM_FEEDBACK 0x8E22 +#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25 +#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257 +#define GL_PROGRAM_BINARY_LENGTH 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE +#define GL_PROGRAM_BINARY_FORMATS 0x87FF +#define GL_COMPRESSED_R11_EAC 0x9270 +#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271 +#define GL_COMPRESSED_RG11_EAC 0x9272 +#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 +#define GL_COMPRESSED_RGB8_ETC2 0x9274 +#define GL_COMPRESSED_SRGB8_ETC2 0x9275 +#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 +#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277 +#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 +#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279 +#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F +#define GL_MAX_ELEMENT_INDEX 0x8D6B +#define GL_NUM_SAMPLE_COUNTS 0x9380 +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +typedef void (GL_APIENTRYP PFNGLREADBUFFERPROC) (GLenum src); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLGENQUERIESPROC) (GLsizei n, GLuint *ids); +typedef void (GL_APIENTRYP PFNGLDELETEQUERIESPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISQUERYPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLBEGINQUERYPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLENDQUERYPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETQUERYIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVPROC) (GLuint id, GLenum pname, GLuint *params); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFERPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVPROC) (GLenum target, GLenum pname, void **params); +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSPROC) (GLsizei n, const GLenum *bufs); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VPROC) (GLenum target, GLuint index, GLint *data); +typedef void (GL_APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC) (GLenum primitiveMode); +typedef void (GL_APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERRANGEPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERBASEPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC) (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +typedef void (GL_APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC) (GLuint index, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IVPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC) (GLuint index, const GLuint *v); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMUIVPROC) (GLuint program, GLint location, GLuint *params); +typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATALOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIPROC) (GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIPROC) (GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERIVPROC) (GLenum buffer, GLint drawbuffer, const GLint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERUIVPROC) (GLenum buffer, GLint drawbuffer, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFVPROC) (GLenum buffer, GLint drawbuffer, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFIPROC) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGIPROC) (GLenum name, GLuint index); +typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMINDICESPROC) (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC) (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC) (GLuint program, const GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VPROC) (GLenum pname, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64I_VPROC) (GLenum target, GLuint index, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC) (GLenum target, GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGENSAMPLERSPROC) (GLsizei count, GLuint *samplers); +typedef void (GL_APIENTRYP PFNGLDELETESAMPLERSPROC) (GLsizei count, const GLuint *samplers); +typedef GLboolean (GL_APIENTRYP PFNGLISSAMPLERPROC) (GLuint sampler); +typedef void (GL_APIENTRYP PFNGLBINDSAMPLERPROC) (GLuint unit, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIPROC) (GLuint sampler, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFPROC) (GLuint sampler, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, const GLfloat *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC) (GLuint index, GLuint divisor); +typedef void (GL_APIENTRYP PFNGLBINDTRANSFORMFEEDBACKPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSPROC) (GLsizei n, const GLuint *ids); +typedef void (GL_APIENTRYP PFNGLGENTRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISTRANSFORMFEEDBACKPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIPROC) (GLuint program, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLINVALIDATEFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +typedef void (GL_APIENTRYP PFNGLINVALIDATESUBFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATIVPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBuffer (GLenum src); +GL_APICALL void GL_APIENTRY glDrawRangeElements (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glTexImage3D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glCopyTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glCompressedTexImage3D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glGenQueries (GLsizei n, GLuint *ids); +GL_APICALL void GL_APIENTRY glDeleteQueries (GLsizei n, const GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsQuery (GLuint id); +GL_APICALL void GL_APIENTRY glBeginQuery (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glEndQuery (GLenum target); +GL_APICALL void GL_APIENTRY glGetQueryiv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectuiv (GLuint id, GLenum pname, GLuint *params); +GL_APICALL GLboolean GL_APIENTRY glUnmapBuffer (GLenum target); +GL_APICALL void GL_APIENTRY glGetBufferPointerv (GLenum target, GLenum pname, void **params); +GL_APICALL void GL_APIENTRY glDrawBuffers (GLsizei n, const GLenum *bufs); +GL_APICALL void GL_APIENTRY glUniformMatrix2x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glBlitFramebuffer (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTextureLayer (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GL_APICALL void *GL_APIENTRY glMapBufferRange (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_APICALL void GL_APIENTRY glFlushMappedBufferRange (GLenum target, GLintptr offset, GLsizeiptr length); +GL_APICALL void GL_APIENTRY glBindVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glDeleteVertexArrays (GLsizei n, const GLuint *arrays); +GL_APICALL void GL_APIENTRY glGenVertexArrays (GLsizei n, GLuint *arrays); +GL_APICALL GLboolean GL_APIENTRY glIsVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glGetIntegeri_v (GLenum target, GLuint index, GLint *data); +GL_APICALL void GL_APIENTRY glBeginTransformFeedback (GLenum primitiveMode); +GL_APICALL void GL_APIENTRY glEndTransformFeedback (void); +GL_APICALL void GL_APIENTRY glBindBufferRange (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glBindBufferBase (GLenum target, GLuint index, GLuint buffer); +GL_APICALL void GL_APIENTRY glTransformFeedbackVaryings (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +GL_APICALL void GL_APIENTRY glGetTransformFeedbackVarying (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glVertexAttribIPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glGetVertexAttribIiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribIuiv (GLuint index, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glVertexAttribI4i (GLuint index, GLint x, GLint y, GLint z, GLint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4ui (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4iv (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glVertexAttribI4uiv (GLuint index, const GLuint *v); +GL_APICALL void GL_APIENTRY glGetUniformuiv (GLuint program, GLint location, GLuint *params); +GL_APICALL GLint GL_APIENTRY glGetFragDataLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glUniform1ui (GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glUniform2ui (GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glUniform3ui (GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glUniform4ui (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glUniform1uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform2uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform3uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform4uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferiv (GLenum buffer, GLint drawbuffer, const GLint *value); +GL_APICALL void GL_APIENTRY glClearBufferuiv (GLenum buffer, GLint drawbuffer, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferfv (GLenum buffer, GLint drawbuffer, const GLfloat *value); +GL_APICALL void GL_APIENTRY glClearBufferfi (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +GL_APICALL const GLubyte *GL_APIENTRY glGetStringi (GLenum name, GLuint index); +GL_APICALL void GL_APIENTRY glCopyBufferSubData (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glGetUniformIndices (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +GL_APICALL void GL_APIENTRY glGetActiveUniformsiv (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetUniformBlockIndex (GLuint program, const GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockiv (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockName (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glUniformBlockBinding (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +GL_APICALL void GL_APIENTRY glDrawArraysInstanced (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +GL_APICALL void GL_APIENTRY glDrawElementsInstanced (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +GL_APICALL GLsync GL_APIENTRY glFenceSync (GLenum condition, GLbitfield flags); +GL_APICALL GLboolean GL_APIENTRY glIsSync (GLsync sync); +GL_APICALL void GL_APIENTRY glDeleteSync (GLsync sync); +GL_APICALL GLenum GL_APIENTRY glClientWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glGetInteger64v (GLenum pname, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetSynciv (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +GL_APICALL void GL_APIENTRY glGetInteger64i_v (GLenum target, GLuint index, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteri64v (GLenum target, GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGenSamplers (GLsizei count, GLuint *samplers); +GL_APICALL void GL_APIENTRY glDeleteSamplers (GLsizei count, const GLuint *samplers); +GL_APICALL GLboolean GL_APIENTRY glIsSampler (GLuint sampler); +GL_APICALL void GL_APIENTRY glBindSampler (GLuint unit, GLuint sampler); +GL_APICALL void GL_APIENTRY glSamplerParameteri (GLuint sampler, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glVertexAttribDivisor (GLuint index, GLuint divisor); +GL_APICALL void GL_APIENTRY glBindTransformFeedback (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glDeleteTransformFeedbacks (GLsizei n, const GLuint *ids); +GL_APICALL void GL_APIENTRY glGenTransformFeedbacks (GLsizei n, GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsTransformFeedback (GLuint id); +GL_APICALL void GL_APIENTRY glPauseTransformFeedback (void); +GL_APICALL void GL_APIENTRY glResumeTransformFeedback (void); +GL_APICALL void GL_APIENTRY glGetProgramBinary (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +GL_APICALL void GL_APIENTRY glProgramBinary (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glProgramParameteri (GLuint program, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glInvalidateFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments); +GL_APICALL void GL_APIENTRY glInvalidateSubFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage2D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage3D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_APICALL void GL_APIENTRY glGetInternalformativ (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_ES_VERSION_3_0 */ + +#ifndef GL_ES_VERSION_3_1 +#define GL_ES_VERSION_3_1 1 +#define GL_COMPUTE_SHADER 0x91B9 +#define GL_MAX_COMPUTE_UNIFORM_BLOCKS 0x91BB +#define GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS 0x91BC +#define GL_MAX_COMPUTE_IMAGE_UNIFORMS 0x91BD +#define GL_MAX_COMPUTE_SHARED_MEMORY_SIZE 0x8262 +#define GL_MAX_COMPUTE_UNIFORM_COMPONENTS 0x8263 +#define GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS 0x8264 +#define GL_MAX_COMPUTE_ATOMIC_COUNTERS 0x8265 +#define GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS 0x8266 +#define GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS 0x90EB +#define GL_MAX_COMPUTE_WORK_GROUP_COUNT 0x91BE +#define GL_MAX_COMPUTE_WORK_GROUP_SIZE 0x91BF +#define GL_COMPUTE_WORK_GROUP_SIZE 0x8267 +#define GL_DISPATCH_INDIRECT_BUFFER 0x90EE +#define GL_DISPATCH_INDIRECT_BUFFER_BINDING 0x90EF +#define GL_COMPUTE_SHADER_BIT 0x00000020 +#define GL_DRAW_INDIRECT_BUFFER 0x8F3F +#define GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43 +#define GL_MAX_UNIFORM_LOCATIONS 0x826E +#define GL_FRAMEBUFFER_DEFAULT_WIDTH 0x9310 +#define GL_FRAMEBUFFER_DEFAULT_HEIGHT 0x9311 +#define GL_FRAMEBUFFER_DEFAULT_SAMPLES 0x9313 +#define GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS 0x9314 +#define GL_MAX_FRAMEBUFFER_WIDTH 0x9315 +#define GL_MAX_FRAMEBUFFER_HEIGHT 0x9316 +#define GL_MAX_FRAMEBUFFER_SAMPLES 0x9318 +#define GL_UNIFORM 0x92E1 +#define GL_UNIFORM_BLOCK 0x92E2 +#define GL_PROGRAM_INPUT 0x92E3 +#define GL_PROGRAM_OUTPUT 0x92E4 +#define GL_BUFFER_VARIABLE 0x92E5 +#define GL_SHADER_STORAGE_BLOCK 0x92E6 +#define GL_ATOMIC_COUNTER_BUFFER 0x92C0 +#define GL_TRANSFORM_FEEDBACK_VARYING 0x92F4 +#define GL_ACTIVE_RESOURCES 0x92F5 +#define GL_MAX_NAME_LENGTH 0x92F6 +#define GL_MAX_NUM_ACTIVE_VARIABLES 0x92F7 +#define GL_NAME_LENGTH 0x92F9 +#define GL_TYPE 0x92FA +#define GL_ARRAY_SIZE 0x92FB +#define GL_OFFSET 0x92FC +#define GL_BLOCK_INDEX 0x92FD +#define GL_ARRAY_STRIDE 0x92FE +#define GL_MATRIX_STRIDE 0x92FF +#define GL_IS_ROW_MAJOR 0x9300 +#define GL_ATOMIC_COUNTER_BUFFER_INDEX 0x9301 +#define GL_BUFFER_BINDING 0x9302 +#define GL_BUFFER_DATA_SIZE 0x9303 +#define GL_NUM_ACTIVE_VARIABLES 0x9304 +#define GL_ACTIVE_VARIABLES 0x9305 +#define GL_REFERENCED_BY_VERTEX_SHADER 0x9306 +#define GL_REFERENCED_BY_FRAGMENT_SHADER 0x930A +#define GL_REFERENCED_BY_COMPUTE_SHADER 0x930B +#define GL_TOP_LEVEL_ARRAY_SIZE 0x930C +#define GL_TOP_LEVEL_ARRAY_STRIDE 0x930D +#define GL_LOCATION 0x930E +#define GL_VERTEX_SHADER_BIT 0x00000001 +#define GL_FRAGMENT_SHADER_BIT 0x00000002 +#define GL_ALL_SHADER_BITS 0xFFFFFFFF +#define GL_PROGRAM_SEPARABLE 0x8258 +#define GL_ACTIVE_PROGRAM 0x8259 +#define GL_PROGRAM_PIPELINE_BINDING 0x825A +#define GL_ATOMIC_COUNTER_BUFFER_BINDING 0x92C1 +#define GL_ATOMIC_COUNTER_BUFFER_START 0x92C2 +#define GL_ATOMIC_COUNTER_BUFFER_SIZE 0x92C3 +#define GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS 0x92CC +#define GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS 0x92D0 +#define GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS 0x92D1 +#define GL_MAX_VERTEX_ATOMIC_COUNTERS 0x92D2 +#define GL_MAX_FRAGMENT_ATOMIC_COUNTERS 0x92D6 +#define GL_MAX_COMBINED_ATOMIC_COUNTERS 0x92D7 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE 0x92D8 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS 0x92DC +#define GL_ACTIVE_ATOMIC_COUNTER_BUFFERS 0x92D9 +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB +#define GL_MAX_IMAGE_UNITS 0x8F38 +#define GL_MAX_VERTEX_IMAGE_UNIFORMS 0x90CA +#define GL_MAX_FRAGMENT_IMAGE_UNIFORMS 0x90CE +#define GL_MAX_COMBINED_IMAGE_UNIFORMS 0x90CF +#define GL_IMAGE_BINDING_NAME 0x8F3A +#define GL_IMAGE_BINDING_LEVEL 0x8F3B +#define GL_IMAGE_BINDING_LAYERED 0x8F3C +#define GL_IMAGE_BINDING_LAYER 0x8F3D +#define GL_IMAGE_BINDING_ACCESS 0x8F3E +#define GL_IMAGE_BINDING_FORMAT 0x906E +#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT 0x00000001 +#define GL_ELEMENT_ARRAY_BARRIER_BIT 0x00000002 +#define GL_UNIFORM_BARRIER_BIT 0x00000004 +#define GL_TEXTURE_FETCH_BARRIER_BIT 0x00000008 +#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT 0x00000020 +#define GL_COMMAND_BARRIER_BIT 0x00000040 +#define GL_PIXEL_BUFFER_BARRIER_BIT 0x00000080 +#define GL_TEXTURE_UPDATE_BARRIER_BIT 0x00000100 +#define GL_BUFFER_UPDATE_BARRIER_BIT 0x00000200 +#define GL_FRAMEBUFFER_BARRIER_BIT 0x00000400 +#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT 0x00000800 +#define GL_ATOMIC_COUNTER_BARRIER_BIT 0x00001000 +#define GL_ALL_BARRIER_BITS 0xFFFFFFFF +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_IMAGE_FORMAT_COMPATIBILITY_TYPE 0x90C7 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE 0x90C8 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS 0x90C9 +#define GL_READ_ONLY 0x88B8 +#define GL_WRITE_ONLY 0x88B9 +#define GL_READ_WRITE 0x88BA +#define GL_SHADER_STORAGE_BUFFER 0x90D2 +#define GL_SHADER_STORAGE_BUFFER_BINDING 0x90D3 +#define GL_SHADER_STORAGE_BUFFER_START 0x90D4 +#define GL_SHADER_STORAGE_BUFFER_SIZE 0x90D5 +#define GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS 0x90D6 +#define GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS 0x90DA +#define GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS 0x90DB +#define GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS 0x90DC +#define GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS 0x90DD +#define GL_MAX_SHADER_STORAGE_BLOCK_SIZE 0x90DE +#define GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT 0x90DF +#define GL_SHADER_STORAGE_BARRIER_BIT 0x00002000 +#define GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES 0x8F39 +#define GL_DEPTH_STENCIL_TEXTURE_MODE 0x90EA +#define GL_STENCIL_INDEX 0x1901 +#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5E +#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5F +#define GL_SAMPLE_POSITION 0x8E50 +#define GL_SAMPLE_MASK 0x8E51 +#define GL_SAMPLE_MASK_VALUE 0x8E52 +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59 +#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E +#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F +#define GL_MAX_INTEGER_SAMPLES 0x9110 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104 +#define GL_TEXTURE_SAMPLES 0x9106 +#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107 +#define GL_TEXTURE_WIDTH 0x1000 +#define GL_TEXTURE_HEIGHT 0x1001 +#define GL_TEXTURE_DEPTH 0x8071 +#define GL_TEXTURE_INTERNAL_FORMAT 0x1003 +#define GL_TEXTURE_RED_SIZE 0x805C +#define GL_TEXTURE_GREEN_SIZE 0x805D +#define GL_TEXTURE_BLUE_SIZE 0x805E +#define GL_TEXTURE_ALPHA_SIZE 0x805F +#define GL_TEXTURE_DEPTH_SIZE 0x884A +#define GL_TEXTURE_STENCIL_SIZE 0x88F1 +#define GL_TEXTURE_SHARED_SIZE 0x8C3F +#define GL_TEXTURE_RED_TYPE 0x8C10 +#define GL_TEXTURE_GREEN_TYPE 0x8C11 +#define GL_TEXTURE_BLUE_TYPE 0x8C12 +#define GL_TEXTURE_ALPHA_TYPE 0x8C13 +#define GL_TEXTURE_DEPTH_TYPE 0x8C16 +#define GL_TEXTURE_COMPRESSED 0x86A1 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A +#define GL_VERTEX_ATTRIB_BINDING 0x82D4 +#define GL_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D5 +#define GL_VERTEX_BINDING_DIVISOR 0x82D6 +#define GL_VERTEX_BINDING_OFFSET 0x82D7 +#define GL_VERTEX_BINDING_STRIDE 0x82D8 +#define GL_VERTEX_BINDING_BUFFER 0x8F4F +#define GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D9 +#define GL_MAX_VERTEX_ATTRIB_BINDINGS 0x82DA +#define GL_MAX_VERTEX_ATTRIB_STRIDE 0x82E5 +typedef void (GL_APIENTRYP PFNGLDISPATCHCOMPUTEPROC) (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +typedef void (GL_APIENTRYP PFNGLDISPATCHCOMPUTEINDIRECTPROC) (GLintptr indirect); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINDIRECTPROC) (GLenum mode, const void *indirect); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINDIRECTPROC) (GLenum mode, GLenum type, const void *indirect); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINTERFACEIVPROC) (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEINDEXPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCENAMEPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEIVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMSTAGESPROC) (GLuint pipeline, GLbitfield stages, GLuint program); +typedef void (GL_APIENTRYP PFNGLACTIVESHADERPROGRAMPROC) (GLuint pipeline, GLuint program); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROGRAMVPROC) (GLenum type, GLsizei count, const GLchar *const*strings); +typedef void (GL_APIENTRYP PFNGLBINDPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPIPELINESPROC) (GLsizei n, const GLuint *pipelines); +typedef void (GL_APIENTRYP PFNGLGENPROGRAMPIPELINESPROC) (GLsizei n, GLuint *pipelines); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEIVPROC) (GLuint pipeline, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IPROC) (GLuint program, GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IPROC) (GLuint program, GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIPROC) (GLuint program, GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FPROC) (GLuint program, GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLBINDIMAGETEXTUREPROC) (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANI_VPROC) (GLenum target, GLuint index, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLMEMORYBARRIERPROC) (GLbitfield barriers); +typedef void (GL_APIENTRYP PFNGLMEMORYBARRIERBYREGIONPROC) (GLbitfield barriers); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (GL_APIENTRYP PFNGLGETMULTISAMPLEFVPROC) (GLenum pname, GLuint index, GLfloat *val); +typedef void (GL_APIENTRYP PFNGLSAMPLEMASKIPROC) (GLuint maskNumber, GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLGETTEXLEVELPARAMETERIVPROC) (GLenum target, GLint level, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXLEVELPARAMETERFVPROC) (GLenum target, GLint level, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXBUFFERPROC) (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBBINDINGPROC) (GLuint attribindex, GLuint bindingindex); +typedef void (GL_APIENTRYP PFNGLVERTEXBINDINGDIVISORPROC) (GLuint bindingindex, GLuint divisor); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glDispatchCompute (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +GL_APICALL void GL_APIENTRY glDispatchComputeIndirect (GLintptr indirect); +GL_APICALL void GL_APIENTRY glDrawArraysIndirect (GLenum mode, const void *indirect); +GL_APICALL void GL_APIENTRY glDrawElementsIndirect (GLenum mode, GLenum type, const void *indirect); +GL_APICALL void GL_APIENTRY glFramebufferParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glGetFramebufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInterfaceiv (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetProgramResourceIndex (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetProgramResourceName (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +GL_APICALL void GL_APIENTRY glGetProgramResourceiv (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetProgramResourceLocation (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL void GL_APIENTRY glUseProgramStages (GLuint pipeline, GLbitfield stages, GLuint program); +GL_APICALL void GL_APIENTRY glActiveShaderProgram (GLuint pipeline, GLuint program); +GL_APICALL GLuint GL_APIENTRY glCreateShaderProgramv (GLenum type, GLsizei count, const GLchar *const*strings); +GL_APICALL void GL_APIENTRY glBindProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glDeleteProgramPipelines (GLsizei n, const GLuint *pipelines); +GL_APICALL void GL_APIENTRY glGenProgramPipelines (GLsizei n, GLuint *pipelines); +GL_APICALL GLboolean GL_APIENTRY glIsProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glGetProgramPipelineiv (GLuint pipeline, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glProgramUniform1i (GLuint program, GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2i (GLuint program, GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1ui (GLuint program, GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2ui (GLuint program, GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1f (GLuint program, GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glProgramUniform2f (GLuint program, GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glProgramUniform3f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glProgramUniform4f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glProgramUniform1iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform1uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform1fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform2fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform3fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform4fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glValidateProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glGetProgramPipelineInfoLog (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glBindImageTexture (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +GL_APICALL void GL_APIENTRY glGetBooleani_v (GLenum target, GLuint index, GLboolean *data); +GL_APICALL void GL_APIENTRY glMemoryBarrier (GLbitfield barriers); +GL_APICALL void GL_APIENTRY glMemoryBarrierByRegion (GLbitfield barriers); +GL_APICALL void GL_APIENTRY glTexStorage2DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +GL_APICALL void GL_APIENTRY glGetMultisamplefv (GLenum pname, GLuint index, GLfloat *val); +GL_APICALL void GL_APIENTRY glSampleMaski (GLuint maskNumber, GLbitfield mask); +GL_APICALL void GL_APIENTRY glGetTexLevelParameteriv (GLenum target, GLint level, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexLevelParameterfv (GLenum target, GLint level, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glBindVertexBuffer (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +GL_APICALL void GL_APIENTRY glVertexAttribFormat (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +GL_APICALL void GL_APIENTRY glVertexAttribIFormat (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +GL_APICALL void GL_APIENTRY glVertexAttribBinding (GLuint attribindex, GLuint bindingindex); +GL_APICALL void GL_APIENTRY glVertexBindingDivisor (GLuint bindingindex, GLuint divisor); +#endif +#endif /* GL_ES_VERSION_3_1 */ + +#ifndef GL_ES_VERSION_3_2 +#define GL_ES_VERSION_3_2 1 +typedef void (GL_APIENTRY *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +#define GL_MULTISAMPLE_LINE_WIDTH_RANGE 0x9381 +#define GL_MULTISAMPLE_LINE_WIDTH_GRANULARITY 0x9382 +#define GL_MULTIPLY 0x9294 +#define GL_SCREEN 0x9295 +#define GL_OVERLAY 0x9296 +#define GL_DARKEN 0x9297 +#define GL_LIGHTEN 0x9298 +#define GL_COLORDODGE 0x9299 +#define GL_COLORBURN 0x929A +#define GL_HARDLIGHT 0x929B +#define GL_SOFTLIGHT 0x929C +#define GL_DIFFERENCE 0x929E +#define GL_EXCLUSION 0x92A0 +#define GL_HSL_HUE 0x92AD +#define GL_HSL_SATURATION 0x92AE +#define GL_HSL_COLOR 0x92AF +#define GL_HSL_LUMINOSITY 0x92B0 +#define GL_DEBUG_OUTPUT_SYNCHRONOUS 0x8242 +#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH 0x8243 +#define GL_DEBUG_CALLBACK_FUNCTION 0x8244 +#define GL_DEBUG_CALLBACK_USER_PARAM 0x8245 +#define GL_DEBUG_SOURCE_API 0x8246 +#define GL_DEBUG_SOURCE_WINDOW_SYSTEM 0x8247 +#define GL_DEBUG_SOURCE_SHADER_COMPILER 0x8248 +#define GL_DEBUG_SOURCE_THIRD_PARTY 0x8249 +#define GL_DEBUG_SOURCE_APPLICATION 0x824A +#define GL_DEBUG_SOURCE_OTHER 0x824B +#define GL_DEBUG_TYPE_ERROR 0x824C +#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR 0x824D +#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR 0x824E +#define GL_DEBUG_TYPE_PORTABILITY 0x824F +#define GL_DEBUG_TYPE_PERFORMANCE 0x8250 +#define GL_DEBUG_TYPE_OTHER 0x8251 +#define GL_DEBUG_TYPE_MARKER 0x8268 +#define GL_DEBUG_TYPE_PUSH_GROUP 0x8269 +#define GL_DEBUG_TYPE_POP_GROUP 0x826A +#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B +#define GL_MAX_DEBUG_GROUP_STACK_DEPTH 0x826C +#define GL_DEBUG_GROUP_STACK_DEPTH 0x826D +#define GL_BUFFER 0x82E0 +#define GL_SHADER 0x82E1 +#define GL_PROGRAM 0x82E2 +#define GL_VERTEX_ARRAY 0x8074 +#define GL_QUERY 0x82E3 +#define GL_PROGRAM_PIPELINE 0x82E4 +#define GL_SAMPLER 0x82E6 +#define GL_MAX_LABEL_LENGTH 0x82E8 +#define GL_MAX_DEBUG_MESSAGE_LENGTH 0x9143 +#define GL_MAX_DEBUG_LOGGED_MESSAGES 0x9144 +#define GL_DEBUG_LOGGED_MESSAGES 0x9145 +#define GL_DEBUG_SEVERITY_HIGH 0x9146 +#define GL_DEBUG_SEVERITY_MEDIUM 0x9147 +#define GL_DEBUG_SEVERITY_LOW 0x9148 +#define GL_DEBUG_OUTPUT 0x92E0 +#define GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002 +#define GL_STACK_OVERFLOW 0x0503 +#define GL_STACK_UNDERFLOW 0x0504 +#define GL_GEOMETRY_SHADER 0x8DD9 +#define GL_GEOMETRY_SHADER_BIT 0x00000004 +#define GL_GEOMETRY_VERTICES_OUT 0x8916 +#define GL_GEOMETRY_INPUT_TYPE 0x8917 +#define GL_GEOMETRY_OUTPUT_TYPE 0x8918 +#define GL_GEOMETRY_SHADER_INVOCATIONS 0x887F +#define GL_LAYER_PROVOKING_VERTEX 0x825E +#define GL_LINES_ADJACENCY 0x000A +#define GL_LINE_STRIP_ADJACENCY 0x000B +#define GL_TRIANGLES_ADJACENCY 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY 0x000D +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS 0x8DDF +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS 0x8A2C +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS 0x8A32 +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS 0x9124 +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS 0x8DE1 +#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS 0x8E5A +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS 0x92CF +#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS 0x92D5 +#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS 0x90CD +#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS 0x90D7 +#define GL_FIRST_VERTEX_CONVENTION 0x8E4D +#define GL_LAST_VERTEX_CONVENTION 0x8E4E +#define GL_UNDEFINED_VERTEX 0x8260 +#define GL_PRIMITIVES_GENERATED 0x8C87 +#define GL_FRAMEBUFFER_DEFAULT_LAYERS 0x9312 +#define GL_MAX_FRAMEBUFFER_LAYERS 0x9317 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS 0x8DA8 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED 0x8DA7 +#define GL_REFERENCED_BY_GEOMETRY_SHADER 0x9309 +#define GL_PRIMITIVE_BOUNDING_BOX 0x92BE +#define GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT 0x00000004 +#define GL_CONTEXT_FLAGS 0x821E +#define GL_LOSE_CONTEXT_ON_RESET 0x8252 +#define GL_GUILTY_CONTEXT_RESET 0x8253 +#define GL_INNOCENT_CONTEXT_RESET 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET 0x8255 +#define GL_RESET_NOTIFICATION_STRATEGY 0x8256 +#define GL_NO_RESET_NOTIFICATION 0x8261 +#define GL_CONTEXT_LOST 0x0507 +#define GL_SAMPLE_SHADING 0x8C36 +#define GL_MIN_SAMPLE_SHADING_VALUE 0x8C37 +#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET 0x8E5B +#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET 0x8E5C +#define GL_FRAGMENT_INTERPOLATION_OFFSET_BITS 0x8E5D +#define GL_PATCHES 0x000E +#define GL_PATCH_VERTICES 0x8E72 +#define GL_TESS_CONTROL_OUTPUT_VERTICES 0x8E75 +#define GL_TESS_GEN_MODE 0x8E76 +#define GL_TESS_GEN_SPACING 0x8E77 +#define GL_TESS_GEN_VERTEX_ORDER 0x8E78 +#define GL_TESS_GEN_POINT_MODE 0x8E79 +#define GL_ISOLINES 0x8E7A +#define GL_QUADS 0x0007 +#define GL_FRACTIONAL_ODD 0x8E7B +#define GL_FRACTIONAL_EVEN 0x8E7C +#define GL_MAX_PATCH_VERTICES 0x8E7D +#define GL_MAX_TESS_GEN_LEVEL 0x8E7E +#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E7F +#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E80 +#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS 0x8E81 +#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS 0x8E82 +#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS 0x8E83 +#define GL_MAX_TESS_PATCH_COMPONENTS 0x8E84 +#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS 0x8E85 +#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS 0x8E86 +#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS 0x8E89 +#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS 0x8E8A +#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS 0x886C +#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS 0x886D +#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E1E +#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E1F +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS 0x92CD +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS 0x92CE +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS 0x92D3 +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS 0x92D4 +#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS 0x90CB +#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS 0x90CC +#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS 0x90D8 +#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS 0x90D9 +#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED 0x8221 +#define GL_IS_PER_PATCH 0x92E7 +#define GL_REFERENCED_BY_TESS_CONTROL_SHADER 0x9307 +#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER 0x9308 +#define GL_TESS_CONTROL_SHADER 0x8E88 +#define GL_TESS_EVALUATION_SHADER 0x8E87 +#define GL_TESS_CONTROL_SHADER_BIT 0x00000008 +#define GL_TESS_EVALUATION_SHADER_BIT 0x00000010 +#define GL_TEXTURE_BORDER_COLOR 0x1004 +#define GL_CLAMP_TO_BORDER 0x812D +#define GL_TEXTURE_BUFFER 0x8C2A +#define GL_TEXTURE_BUFFER_BINDING 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING 0x8C2D +#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT 0x919F +#define GL_SAMPLER_BUFFER 0x8DC2 +#define GL_INT_SAMPLER_BUFFER 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 +#define GL_IMAGE_BUFFER 0x9051 +#define GL_INT_IMAGE_BUFFER 0x905C +#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067 +#define GL_TEXTURE_BUFFER_OFFSET 0x919D +#define GL_TEXTURE_BUFFER_SIZE 0x919E +#define GL_COMPRESSED_RGBA_ASTC_4x4 0x93B0 +#define GL_COMPRESSED_RGBA_ASTC_5x4 0x93B1 +#define GL_COMPRESSED_RGBA_ASTC_5x5 0x93B2 +#define GL_COMPRESSED_RGBA_ASTC_6x5 0x93B3 +#define GL_COMPRESSED_RGBA_ASTC_6x6 0x93B4 +#define GL_COMPRESSED_RGBA_ASTC_8x5 0x93B5 +#define GL_COMPRESSED_RGBA_ASTC_8x6 0x93B6 +#define GL_COMPRESSED_RGBA_ASTC_8x8 0x93B7 +#define GL_COMPRESSED_RGBA_ASTC_10x5 0x93B8 +#define GL_COMPRESSED_RGBA_ASTC_10x6 0x93B9 +#define GL_COMPRESSED_RGBA_ASTC_10x8 0x93BA +#define GL_COMPRESSED_RGBA_ASTC_10x10 0x93BB +#define GL_COMPRESSED_RGBA_ASTC_12x10 0x93BC +#define GL_COMPRESSED_RGBA_ASTC_12x12 0x93BD +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4 0x93D0 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4 0x93D1 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5 0x93D2 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5 0x93D3 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6 0x93D4 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5 0x93D5 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6 0x93D6 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8 0x93D7 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5 0x93D8 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6 0x93D9 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8 0x93DA +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10 0x93DB +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10 0x93DC +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12 0x93DD +#define GL_TEXTURE_CUBE_MAP_ARRAY 0x9009 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY 0x900A +#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F +#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054 +#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY 0x9105 +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D +typedef void (GL_APIENTRYP PFNGLBLENDBARRIERPROC) (void); +typedef void (GL_APIENTRYP PFNGLCOPYIMAGESUBDATAPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECONTROLPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGEINSERTPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECALLBACKPROC) (GLDEBUGPROC callback, const void *userParam); +typedef GLuint (GL_APIENTRYP PFNGLGETDEBUGMESSAGELOGPROC) (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +typedef void (GL_APIENTRYP PFNGLPUSHDEBUGGROUPPROC) (GLenum source, GLuint id, GLsizei length, const GLchar *message); +typedef void (GL_APIENTRYP PFNGLPOPDEBUGGROUPPROC) (void); +typedef void (GL_APIENTRYP PFNGLOBJECTLABELPROC) (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTLABELPROC) (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (GL_APIENTRYP PFNGLOBJECTPTRLABELPROC) (const void *ptr, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTPTRLABELPROC) (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETPOINTERVPROC) (GLenum pname, void **params); +typedef void (GL_APIENTRYP PFNGLENABLEIPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDISABLEIPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONIPROC) (GLuint buf, GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEIPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCIPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEIPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +typedef void (GL_APIENTRYP PFNGLCOLORMASKIPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDIPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSPROC) (void); +typedef void (GL_APIENTRYP PFNGLREADNPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMUIVPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +typedef void (GL_APIENTRYP PFNGLMINSAMPLESHADINGPROC) (GLfloat value); +typedef void (GL_APIENTRYP PFNGLPATCHPARAMETERIPROC) (GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIUIVPROC) (GLenum target, GLenum pname, const GLuint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIUIVPROC) (GLenum target, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIUIVPROC) (GLuint sampler, GLenum pname, const GLuint *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVPROC) (GLuint sampler, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLTEXBUFFERPROC) (GLenum target, GLenum internalformat, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTEXBUFFERRANGEPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlendBarrier (void); +GL_APICALL void GL_APIENTRY glCopyImageSubData (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +GL_APICALL void GL_APIENTRY glDebugMessageControl (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +GL_APICALL void GL_APIENTRY glDebugMessageInsert (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +GL_APICALL void GL_APIENTRY glDebugMessageCallback (GLDEBUGPROC callback, const void *userParam); +GL_APICALL GLuint GL_APIENTRY glGetDebugMessageLog (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +GL_APICALL void GL_APIENTRY glPushDebugGroup (GLenum source, GLuint id, GLsizei length, const GLchar *message); +GL_APICALL void GL_APIENTRY glPopDebugGroup (void); +GL_APICALL void GL_APIENTRY glObjectLabel (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectLabel (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +GL_APICALL void GL_APIENTRY glObjectPtrLabel (const void *ptr, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectPtrLabel (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +GL_APICALL void GL_APIENTRY glGetPointerv (GLenum pname, void **params); +GL_APICALL void GL_APIENTRY glEnablei (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisablei (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glBlendEquationi (GLuint buf, GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparatei (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunci (GLuint buf, GLenum src, GLenum dst); +GL_APICALL void GL_APIENTRY glBlendFuncSeparatei (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +GL_APICALL void GL_APIENTRY glColorMaski (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GL_APICALL GLboolean GL_APIENTRY glIsEnabledi (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDrawElementsBaseVertex (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawRangeElementsBaseVertex (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertex (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +GL_APICALL void GL_APIENTRY glFramebufferTexture (GLenum target, GLenum attachment, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glPrimitiveBoundingBox (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatus (void); +GL_APICALL void GL_APIENTRY glReadnPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +GL_APICALL void GL_APIENTRY glGetnUniformfv (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetnUniformiv (GLuint program, GLint location, GLsizei bufSize, GLint *params); +GL_APICALL void GL_APIENTRY glGetnUniformuiv (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +GL_APICALL void GL_APIENTRY glMinSampleShading (GLfloat value); +GL_APICALL void GL_APIENTRY glPatchParameteri (GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glTexParameterIiv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexParameterIuiv (GLenum target, GLenum pname, const GLuint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIiv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIuiv (GLenum target, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glSamplerParameterIiv (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterIuiv (GLuint sampler, GLenum pname, const GLuint *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIiv (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIuiv (GLuint sampler, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glTexBuffer (GLenum target, GLenum internalformat, GLuint buffer); +GL_APICALL void GL_APIENTRY glTexBufferRange (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glTexStorage3DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#endif +#endif /* GL_ES_VERSION_3_2 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/khronos/GLES3/gl3ext.h b/sources/khronos/GLES3/gl3ext.h new file mode 100755 index 00000000..4d4ea96c --- /dev/null +++ b/sources/khronos/GLES3/gl3ext.h @@ -0,0 +1,24 @@ +#ifndef __gl3ext_h_ +#define __gl3ext_h_ + +/* $Revision: 17809 $ on $Date:: 2012-05-14 08:03:36 -0700 #$ */ + +/* + * This document is licensed under the SGI Free Software B License Version + * 2.0. For details, see http://oss.sgi.com/projects/FreeB/ . + */ + +/* OpenGL ES 3 Extensions + * + * After an OES extension's interactions with OpenGl ES 3.0 have been documented, + * its tokens and function definitions should be added to this file in a manner + * that does not conflict with gl2ext.h or gl3.h. + * + * Tokens and function definitions for extensions that have become standard + * features in OpenGL ES 3.0 will not be added to this file. + * + * Applications using OpenGL-ES-2-only extensions should include gl2ext.h + */ + +#endif /* __gl3ext_h_ */ + diff --git a/sources/khronos/GLES3/gl3platform.h b/sources/khronos/GLES3/gl3platform.h new file mode 100755 index 00000000..2550c5a4 --- /dev/null +++ b/sources/khronos/GLES3/gl3platform.h @@ -0,0 +1,38 @@ +#ifndef __gl3platform_h_ +#define __gl3platform_h_ + +/* +** Copyright (c) 2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* Platform-specific types and definitions for OpenGL ES 3.X gl3.h + * + * Adopters may modify khrplatform.h and this file to suit their platform. + * Please contribute modifications back to Khronos as pull requests on the + * public github repository: + * https://github.com/KhronosGroup/OpenGL-Registry + */ + +#include + +#ifndef GL_APICALL +#define GL_APICALL KHRONOS_APICALL +#endif + +#ifndef GL_APIENTRY +#define GL_APIENTRY KHRONOS_APIENTRY +#endif + +#endif /* __gl3platform_h_ */ diff --git a/sources/khronos/KHR/khrplatform.h b/sources/khronos/KHR/khrplatform.h new file mode 100755 index 00000000..153bbbd2 --- /dev/null +++ b/sources/khronos/KHR/khrplatform.h @@ -0,0 +1,271 @@ +#ifndef __khrplatform_h_ +#define __khrplatform_h_ + +/* +** Copyright (c) 2008-2009 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +/* Khronos platform-specific types and definitions. + * + * $Revision: 9356 $ on $Date: 2009-10-21 02:52:25 -0700 (Wed, 21 Oct 2009) $ + * + * Adopters may modify this file to suit their platform. Adopters are + * encouraged to submit platform specific modifications to the Khronos + * group so that they can be included in future versions of this file. + * Please submit changes by sending them to the public Khronos Bugzilla + * (http://khronos.org/bugzilla) by filing a bug against product + * "Khronos (general)" component "Registry". + * + * A predefined template which fills in some of the bug fields can be + * reached using http://tinyurl.com/khrplatform-h-bugreport, but you + * must create a Bugzilla login first. + * + * + * See the Implementer's Guidelines for information about where this file + * should be located on your system and for more details of its use: + * http://www.khronos.org/registry/implementers_guide.pdf + * + * This file should be included as + * #include + * by Khronos client API header files that use its types and defines. + * + * The types in khrplatform.h should only be used to define API-specific types. + * + * Types defined in khrplatform.h: + * khronos_int8_t signed 8 bit + * khronos_uint8_t unsigned 8 bit + * khronos_int16_t signed 16 bit + * khronos_uint16_t unsigned 16 bit + * khronos_int32_t signed 32 bit + * khronos_uint32_t unsigned 32 bit + * khronos_int64_t signed 64 bit + * khronos_uint64_t unsigned 64 bit + * khronos_intptr_t signed same number of bits as a pointer + * khronos_uintptr_t unsigned same number of bits as a pointer + * khronos_ssize_t signed size + * khronos_usize_t unsigned size + * khronos_float_t signed 32 bit floating point + * khronos_time_ns_t unsigned 64 bit time in nanoseconds + * khronos_utime_nanoseconds_t unsigned time interval or absolute time in + * nanoseconds + * khronos_stime_nanoseconds_t signed time interval in nanoseconds + * khronos_boolean_enum_t enumerated boolean type. This should + * only be used as a base type when a client API's boolean type is + * an enum. Client APIs which use an integer or other type for + * booleans cannot use this as the base type for their boolean. + * + * Tokens defined in khrplatform.h: + * + * KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values. + * + * KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0. + * KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0. + * + * Calling convention macros defined in this file: + * KHRONOS_APICALL + * KHRONOS_APIENTRY + * KHRONOS_APIATTRIBUTES + * + * These may be used in function prototypes as: + * + * KHRONOS_APICALL void KHRONOS_APIENTRY funcname( + * int arg1, + * int arg2) KHRONOS_APIATTRIBUTES; + */ + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APICALL + *------------------------------------------------------------------------- + * This precedes the return type of the function in the function prototype. + */ +#if defined(_WIN32) && !defined(__SCITECH_SNAP__) +# define KHRONOS_APICALL __declspec(dllimport) +#elif defined (__SYMBIAN32__) +# define KHRONOS_APICALL IMPORT_C +#elif defined(ANDROID) +# define KHRONOS_APICALL __attribute__((visibility("default"))) +#else +# define KHRONOS_APICALL +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APIENTRY + *------------------------------------------------------------------------- + * This follows the return type of the function and precedes the function + * name in the function prototype. + */ +#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__SCITECH_SNAP__) + /* Win32 but not WinCE */ +# define KHRONOS_APIENTRY __stdcall +#else +# define KHRONOS_APIENTRY +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APIATTRIBUTES + *------------------------------------------------------------------------- + * This follows the closing parenthesis of the function prototype arguments. + */ +#if defined (__ARMCC_2__) +#define KHRONOS_APIATTRIBUTES __softfp +#else +#define KHRONOS_APIATTRIBUTES +#endif + +/*------------------------------------------------------------------------- + * basic type definitions + *-----------------------------------------------------------------------*/ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__) + + +/* + * Using + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(__VMS ) || defined(__sgi) + +/* + * Using + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(_WIN32) && !defined(__SCITECH_SNAP__) + +/* + * Win32 + */ +typedef __int32 khronos_int32_t; +typedef unsigned __int32 khronos_uint32_t; +typedef __int64 khronos_int64_t; +typedef unsigned __int64 khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(__sun__) || defined(__digital__) + +/* + * Sun or Digital + */ +typedef int khronos_int32_t; +typedef unsigned int khronos_uint32_t; +#if defined(__arch64__) || defined(_LP64) +typedef long int khronos_int64_t; +typedef unsigned long int khronos_uint64_t; +#else +typedef long long int khronos_int64_t; +typedef unsigned long long int khronos_uint64_t; +#endif /* __arch64__ */ +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif 0 + +/* + * Hypothetical platform with no float or int64 support + */ +typedef int khronos_int32_t; +typedef unsigned int khronos_uint32_t; +#define KHRONOS_SUPPORT_INT64 0 +#define KHRONOS_SUPPORT_FLOAT 0 + +#else + +/* + * Generic fallback + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#endif + + +/* + * Types that are (so far) the same on all platforms + */ +typedef signed char khronos_int8_t; +typedef unsigned char khronos_uint8_t; +typedef signed short int khronos_int16_t; +typedef unsigned short int khronos_uint16_t; +typedef signed long int khronos_intptr_t; +typedef unsigned long int khronos_uintptr_t; +typedef signed long int khronos_ssize_t; +typedef unsigned long int khronos_usize_t; + +#if KHRONOS_SUPPORT_FLOAT +/* + * Float type + */ +typedef float khronos_float_t; +#endif + +#if KHRONOS_SUPPORT_INT64 +/* Time types + * + * These types can be used to represent a time interval in nanoseconds or + * an absolute Unadjusted System Time. Unadjusted System Time is the number + * of nanoseconds since some arbitrary system event (e.g. since the last + * time the system booted). The Unadjusted System Time is an unsigned + * 64 bit value that wraps back to 0 every 584 years. Time intervals + * may be either signed or unsigned. + */ +typedef khronos_uint64_t khronos_utime_nanoseconds_t; +typedef khronos_int64_t khronos_stime_nanoseconds_t; +#endif + +/* + * Dummy value used to pad enum types to 32 bits. + */ +#ifndef KHRONOS_MAX_ENUM +#define KHRONOS_MAX_ENUM 0x7FFFFFFF +#endif + +/* + * Enumerated boolean type + * + * Values other than zero should be considered to be true. Therefore + * comparisons should not be made against KHRONOS_TRUE. + */ +typedef enum { + KHRONOS_FALSE = 0, + KHRONOS_TRUE = 1, + KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM +} khronos_boolean_enum_t; + +#endif /* __khrplatform_h_ */ diff --git a/sources/khronos/vulkan/vk_platform.h b/sources/khronos/vulkan/vk_platform.h new file mode 100755 index 00000000..6dc5eb58 --- /dev/null +++ b/sources/khronos/vulkan/vk_platform.h @@ -0,0 +1,120 @@ +// +// File: vk_platform.h +// +/* +** Copyright (c) 2014-2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#ifndef VK_PLATFORM_H_ +#define VK_PLATFORM_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif // __cplusplus + +/* +*************************************************************************************************** +* Platform-specific directives and type declarations +*************************************************************************************************** +*/ + +/* Platform-specific calling convention macros. + * + * Platforms should define these so that Vulkan clients call Vulkan commands + * with the same calling conventions that the Vulkan implementation expects. + * + * VKAPI_ATTR - Placed before the return type in function declarations. + * Useful for C++11 and GCC/Clang-style function attribute syntax. + * VKAPI_CALL - Placed after the return type in function declarations. + * Useful for MSVC-style calling convention syntax. + * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. + * + * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); + * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); + */ +#if defined(_WIN32) + // On Windows, Vulkan commands use the stdcall convention + #define VKAPI_ATTR + #define VKAPI_CALL __stdcall + #define VKAPI_PTR VKAPI_CALL +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 + #error "Vulkan isn't supported for the 'armeabi' NDK ABI" +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) + // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" + // calling convention, i.e. float parameters are passed in registers. This + // is true even if the rest of the application passes floats on the stack, + // as it does by default when compiling for the armeabi-v7a NDK ABI. + #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) + #define VKAPI_CALL + #define VKAPI_PTR VKAPI_ATTR +#else + // On other platforms, use the default calling convention + #define VKAPI_ATTR + #define VKAPI_CALL + #define VKAPI_PTR +#endif + +#include + +#if !defined(VK_NO_STDINT_H) + #if defined(_MSC_VER) && (_MSC_VER < 1600) + typedef signed __int8 int8_t; + typedef unsigned __int8 uint8_t; + typedef signed __int16 int16_t; + typedef unsigned __int16 uint16_t; + typedef signed __int32 int32_t; + typedef unsigned __int32 uint32_t; + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #else + #include + #endif +#endif // !defined(VK_NO_STDINT_H) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +// Platform-specific headers required by platform window system extensions. +// These are enabled prior to #including "vulkan.h". The same enable then +// controls inclusion of the extension interfaces in vulkan.h. + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +struct ANativeWindow; +#endif + +#ifdef VK_USE_PLATFORM_MIR_KHR +#include +#endif + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#include +#endif + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include +#endif + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#include +#endif + +#ifdef VK_USE_PLATFORM_XCB_KHR +#include +#endif + +#endif diff --git a/sources/khronos/vulkan/vulkan.h b/sources/khronos/vulkan/vulkan.h new file mode 100755 index 00000000..7813e4b3 --- /dev/null +++ b/sources/khronos/vulkan/vulkan.h @@ -0,0 +1,6860 @@ +#ifndef VULKAN_H_ +#define VULKAN_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_VERSION_1_0 1 +#include "vk_platform.h" + +#define VK_MAKE_VERSION(major, minor, patch) \ + (((major) << 22) | ((minor) << 12) | (patch)) + +// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. +//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 + +// Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)// Patch version should always be set to 0 + +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22) +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff) +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff) +// Version of this file +#define VK_HEADER_VERSION 61 + + +#define VK_NULL_HANDLE 0 + + + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + + +#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE) +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; +#else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; +#endif +#endif + + + +typedef uint32_t VkFlags; +typedef uint32_t VkBool32; +typedef uint64_t VkDeviceSize; +typedef uint32_t VkSampleMask; + +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) +VK_DEFINE_HANDLE(VkCommandBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) + +#define VK_LOD_CLAMP_NONE 1000.0f +#define VK_REMAINING_MIP_LEVELS (~0U) +#define VK_REMAINING_ARRAY_LAYERS (~0U) +#define VK_WHOLE_SIZE (~0ULL) +#define VK_ATTACHMENT_UNUSED (~0U) +#define VK_TRUE 1 +#define VK_FALSE 0 +#define VK_QUEUE_FAMILY_IGNORED (~0U) +#define VK_SUBPASS_EXTERNAL (~0U) +#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256 +#define VK_UUID_SIZE 16 +#define VK_MAX_MEMORY_TYPES 32 +#define VK_MAX_MEMORY_HEAPS 16 +#define VK_MAX_EXTENSION_NAME_SIZE 256 +#define VK_MAX_DESCRIPTION_SIZE 256 + + +typedef enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, + VK_PIPELINE_CACHE_HEADER_VERSION_BEGIN_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + VK_PIPELINE_CACHE_HEADER_VERSION_END_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + VK_PIPELINE_CACHE_HEADER_VERSION_RANGE_SIZE = (VK_PIPELINE_CACHE_HEADER_VERSION_ONE - VK_PIPELINE_CACHE_HEADER_VERSION_ONE + 1), + VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheHeaderVersion; + +typedef enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, + VK_ERROR_INITIALIZATION_FAILED = -3, + VK_ERROR_DEVICE_LOST = -4, + VK_ERROR_MEMORY_MAP_FAILED = -5, + VK_ERROR_LAYER_NOT_PRESENT = -6, + VK_ERROR_EXTENSION_NOT_PRESENT = -7, + VK_ERROR_FEATURE_NOT_PRESENT = -8, + VK_ERROR_INCOMPATIBLE_DRIVER = -9, + VK_ERROR_TOO_MANY_OBJECTS = -10, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11, + VK_ERROR_FRAGMENTED_POOL = -12, + VK_ERROR_SURFACE_LOST_KHR = -1000000000, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, + VK_SUBOPTIMAL_KHR = 1000001003, + VK_ERROR_OUT_OF_DATE_KHR = -1000001004, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, + VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, + VK_ERROR_INVALID_SHADER_NV = -1000012000, + VK_ERROR_OUT_OF_POOL_MEMORY_KHR = -1000069000, + VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = -1000072003, + VK_RESULT_BEGIN_RANGE = VK_ERROR_FRAGMENTED_POOL, + VK_RESULT_END_RANGE = VK_INCOMPLETE, + VK_RESULT_RANGE_SIZE = (VK_INCOMPLETE - VK_ERROR_FRAGMENTED_POOL + 1), + VK_RESULT_MAX_ENUM = 0x7FFFFFFF +} VkResult; + +typedef enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000, + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHX = 1000053000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHX = 1000053001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHX = 1000053002, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = 1000059000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = 1000059001, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = 1000059002, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = 1000059003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = 1000059004, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = 1000059005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = 1000059006, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = 1000059007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = 1000059008, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHX = 1000060000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHX = 1000060003, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHX = 1000060004, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHX = 1000060005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHX = 1000060006, + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHX = 1000060010, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHX = 1000060013, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHX = 1000060014, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHX = 1000060007, + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHX = 1000060008, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHX = 1000060009, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHX = 1000060011, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHX = 1000060012, + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHX = 1000070000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHX = 1000070001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = 1000071000, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = 1000071001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = 1000071002, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = 1000071003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = 1000071004, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = 1000072000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = 1000072001, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = 1000072002, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = 1000076000, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = 1000076001, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = 1000077000, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = 1000083000, + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = 1000085000, + VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX = 1000086000, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX = 1000086001, + VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX = 1000086002, + VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX = 1000086003, + VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX = 1000086004, + VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX = 1000086005, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = 1000112000, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = 1000112001, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = 1000113000, + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = 1000117000, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = 1000117001, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = 1000117002, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = 1000117003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = 1000120000, + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = 1000127000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = 1000127001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = 1000130000, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = 1000130001, + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = 1000146000, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = 1000146001, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = 1000146002, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = 1000146003, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = 1000146004, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = 1000147000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = 1000156000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = 1000156001, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = 1000156002, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = 1000156003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = 1000156004, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = 1000156005, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = 1000157000, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = 1000157001, + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, + VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO, + VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1), + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkStructureType; + +typedef enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, + VK_SYSTEM_ALLOCATION_SCOPE_BEGIN_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND, + VK_SYSTEM_ALLOCATION_SCOPE_END_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE, + VK_SYSTEM_ALLOCATION_SCOPE_RANGE_SIZE = (VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE - VK_SYSTEM_ALLOCATION_SCOPE_COMMAND + 1), + VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF +} VkSystemAllocationScope; + +typedef enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, + VK_INTERNAL_ALLOCATION_TYPE_BEGIN_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE, + VK_INTERNAL_ALLOCATION_TYPE_END_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE, + VK_INTERNAL_ALLOCATION_TYPE_RANGE_SIZE = (VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE - VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + 1), + VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkInternalAllocationType; + +typedef enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, + VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, + VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, + VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, + VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, + VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, + VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, + VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, + VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, + VK_FORMAT_G8B8G8R8_422_UNORM_KHR = 1000156000, + VK_FORMAT_B8G8R8G8_422_UNORM_KHR = 1000156001, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = 1000156002, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = 1000156003, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = 1000156004, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = 1000156005, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = 1000156006, + VK_FORMAT_R10X6_UNORM_PACK16_KHR = 1000156007, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = 1000156008, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = 1000156009, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = 1000156010, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = 1000156011, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = 1000156012, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = 1000156013, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = 1000156014, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = 1000156015, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = 1000156016, + VK_FORMAT_R12X4_UNORM_PACK16_KHR = 1000156017, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = 1000156018, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = 1000156019, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = 1000156020, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = 1000156021, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = 1000156022, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = 1000156023, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = 1000156024, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = 1000156025, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = 1000156026, + VK_FORMAT_G16B16G16R16_422_UNORM_KHR = 1000156027, + VK_FORMAT_B16G16R16G16_422_UNORM_KHR = 1000156028, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = 1000156029, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = 1000156030, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = 1000156031, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = 1000156032, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = 1000156033, + VK_FORMAT_BEGIN_RANGE = VK_FORMAT_UNDEFINED, + VK_FORMAT_END_RANGE = VK_FORMAT_ASTC_12x12_SRGB_BLOCK, + VK_FORMAT_RANGE_SIZE = (VK_FORMAT_ASTC_12x12_SRGB_BLOCK - VK_FORMAT_UNDEFINED + 1), + VK_FORMAT_MAX_ENUM = 0x7FFFFFFF +} VkFormat; + +typedef enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, + VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D, + VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D, + VK_IMAGE_TYPE_RANGE_SIZE = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1), + VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageType; + +typedef enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, + VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_LINEAR, + VK_IMAGE_TILING_RANGE_SIZE = (VK_IMAGE_TILING_LINEAR - VK_IMAGE_TILING_OPTIMAL + 1), + VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF +} VkImageTiling; + +typedef enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, + VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE = VK_PHYSICAL_DEVICE_TYPE_OTHER, + VK_PHYSICAL_DEVICE_TYPE_END_RANGE = VK_PHYSICAL_DEVICE_TYPE_CPU, + VK_PHYSICAL_DEVICE_TYPE_RANGE_SIZE = (VK_PHYSICAL_DEVICE_TYPE_CPU - VK_PHYSICAL_DEVICE_TYPE_OTHER + 1), + VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkPhysicalDeviceType; + +typedef enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, + VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION, + VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP, + VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1), + VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkQueryType; + +typedef enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, + VK_SHARING_MODE_BEGIN_RANGE = VK_SHARING_MODE_EXCLUSIVE, + VK_SHARING_MODE_END_RANGE = VK_SHARING_MODE_CONCURRENT, + VK_SHARING_MODE_RANGE_SIZE = (VK_SHARING_MODE_CONCURRENT - VK_SHARING_MODE_EXCLUSIVE + 1), + VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSharingMode; + +typedef enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, + VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = 1000117000, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = 1000117001, + VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_PREINITIALIZED, + VK_IMAGE_LAYOUT_RANGE_SIZE = (VK_IMAGE_LAYOUT_PREINITIALIZED - VK_IMAGE_LAYOUT_UNDEFINED + 1), + VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF +} VkImageLayout; + +typedef enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, + VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_TYPE_1D, + VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, + VK_IMAGE_VIEW_TYPE_RANGE_SIZE = (VK_IMAGE_VIEW_TYPE_CUBE_ARRAY - VK_IMAGE_VIEW_TYPE_1D + 1), + VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageViewType; + +typedef enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, + VK_COMPONENT_SWIZZLE_BEGIN_RANGE = VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_END_RANGE = VK_COMPONENT_SWIZZLE_A, + VK_COMPONENT_SWIZZLE_RANGE_SIZE = (VK_COMPONENT_SWIZZLE_A - VK_COMPONENT_SWIZZLE_IDENTITY + 1), + VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF +} VkComponentSwizzle; + +typedef enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, + VK_VERTEX_INPUT_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_RATE_VERTEX, + VK_VERTEX_INPUT_RATE_END_RANGE = VK_VERTEX_INPUT_RATE_INSTANCE, + VK_VERTEX_INPUT_RATE_RANGE_SIZE = (VK_VERTEX_INPUT_RATE_INSTANCE - VK_VERTEX_INPUT_RATE_VERTEX + 1), + VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF +} VkVertexInputRate; + +typedef enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, + VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_PRIMITIVE_TOPOLOGY_POINT_LIST, + VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, + VK_PRIMITIVE_TOPOLOGY_RANGE_SIZE = (VK_PRIMITIVE_TOPOLOGY_PATCH_LIST - VK_PRIMITIVE_TOPOLOGY_POINT_LIST + 1), + VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF +} VkPrimitiveTopology; + +typedef enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, + VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, + VK_POLYGON_MODE_BEGIN_RANGE = VK_POLYGON_MODE_FILL, + VK_POLYGON_MODE_END_RANGE = VK_POLYGON_MODE_POINT, + VK_POLYGON_MODE_RANGE_SIZE = (VK_POLYGON_MODE_POINT - VK_POLYGON_MODE_FILL + 1), + VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF +} VkPolygonMode; + +typedef enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, + VK_FRONT_FACE_BEGIN_RANGE = VK_FRONT_FACE_COUNTER_CLOCKWISE, + VK_FRONT_FACE_END_RANGE = VK_FRONT_FACE_CLOCKWISE, + VK_FRONT_FACE_RANGE_SIZE = (VK_FRONT_FACE_CLOCKWISE - VK_FRONT_FACE_COUNTER_CLOCKWISE + 1), + VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF +} VkFrontFace; + +typedef enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, + VK_COMPARE_OP_BEGIN_RANGE = VK_COMPARE_OP_NEVER, + VK_COMPARE_OP_END_RANGE = VK_COMPARE_OP_ALWAYS, + VK_COMPARE_OP_RANGE_SIZE = (VK_COMPARE_OP_ALWAYS - VK_COMPARE_OP_NEVER + 1), + VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF +} VkCompareOp; + +typedef enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, + VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP, + VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DECREMENT_AND_WRAP, + VK_STENCIL_OP_RANGE_SIZE = (VK_STENCIL_OP_DECREMENT_AND_WRAP - VK_STENCIL_OP_KEEP + 1), + VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF +} VkStencilOp; + +typedef enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, + VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_CLEAR, + VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET, + VK_LOGIC_OP_RANGE_SIZE = (VK_LOGIC_OP_SET - VK_LOGIC_OP_CLEAR + 1), + VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF +} VkLogicOp; + +typedef enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, + VK_BLEND_FACTOR_BEGIN_RANGE = VK_BLEND_FACTOR_ZERO, + VK_BLEND_FACTOR_END_RANGE = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA, + VK_BLEND_FACTOR_RANGE_SIZE = (VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA - VK_BLEND_FACTOR_ZERO + 1), + VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF +} VkBlendFactor; + +typedef enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, + VK_BLEND_OP_ZERO_EXT = 1000148000, + VK_BLEND_OP_SRC_EXT = 1000148001, + VK_BLEND_OP_DST_EXT = 1000148002, + VK_BLEND_OP_SRC_OVER_EXT = 1000148003, + VK_BLEND_OP_DST_OVER_EXT = 1000148004, + VK_BLEND_OP_SRC_IN_EXT = 1000148005, + VK_BLEND_OP_DST_IN_EXT = 1000148006, + VK_BLEND_OP_SRC_OUT_EXT = 1000148007, + VK_BLEND_OP_DST_OUT_EXT = 1000148008, + VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, + VK_BLEND_OP_DST_ATOP_EXT = 1000148010, + VK_BLEND_OP_XOR_EXT = 1000148011, + VK_BLEND_OP_MULTIPLY_EXT = 1000148012, + VK_BLEND_OP_SCREEN_EXT = 1000148013, + VK_BLEND_OP_OVERLAY_EXT = 1000148014, + VK_BLEND_OP_DARKEN_EXT = 1000148015, + VK_BLEND_OP_LIGHTEN_EXT = 1000148016, + VK_BLEND_OP_COLORDODGE_EXT = 1000148017, + VK_BLEND_OP_COLORBURN_EXT = 1000148018, + VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, + VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, + VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, + VK_BLEND_OP_EXCLUSION_EXT = 1000148022, + VK_BLEND_OP_INVERT_EXT = 1000148023, + VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, + VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, + VK_BLEND_OP_LINEARBURN_EXT = 1000148026, + VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, + VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, + VK_BLEND_OP_PINLIGHT_EXT = 1000148029, + VK_BLEND_OP_HARDMIX_EXT = 1000148030, + VK_BLEND_OP_HSL_HUE_EXT = 1000148031, + VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, + VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, + VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, + VK_BLEND_OP_PLUS_EXT = 1000148035, + VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, + VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, + VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, + VK_BLEND_OP_MINUS_EXT = 1000148039, + VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, + VK_BLEND_OP_CONTRAST_EXT = 1000148041, + VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, + VK_BLEND_OP_RED_EXT = 1000148043, + VK_BLEND_OP_GREEN_EXT = 1000148044, + VK_BLEND_OP_BLUE_EXT = 1000148045, + VK_BLEND_OP_BEGIN_RANGE = VK_BLEND_OP_ADD, + VK_BLEND_OP_END_RANGE = VK_BLEND_OP_MAX, + VK_BLEND_OP_RANGE_SIZE = (VK_BLEND_OP_MAX - VK_BLEND_OP_ADD + 1), + VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF +} VkBlendOp; + +typedef enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT, + VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE, + VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1), + VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF +} VkDynamicState; + +typedef enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, + VK_FILTER_CUBIC_IMG = 1000015000, + VK_FILTER_BEGIN_RANGE = VK_FILTER_NEAREST, + VK_FILTER_END_RANGE = VK_FILTER_LINEAR, + VK_FILTER_RANGE_SIZE = (VK_FILTER_LINEAR - VK_FILTER_NEAREST + 1), + VK_FILTER_MAX_ENUM = 0x7FFFFFFF +} VkFilter; + +typedef enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, + VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE = VK_SAMPLER_MIPMAP_MODE_NEAREST, + VK_SAMPLER_MIPMAP_MODE_END_RANGE = VK_SAMPLER_MIPMAP_MODE_LINEAR, + VK_SAMPLER_MIPMAP_MODE_RANGE_SIZE = (VK_SAMPLER_MIPMAP_MODE_LINEAR - VK_SAMPLER_MIPMAP_MODE_NEAREST + 1), + VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerMipmapMode; + +typedef enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT, + VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, + VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1), + VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerAddressMode; + +typedef enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, + VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, + VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE, + VK_BORDER_COLOR_RANGE_SIZE = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1), + VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF +} VkBorderColor; + +typedef enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER, + VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, + VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1), + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorType; + +typedef enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, + VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD, + VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_LOAD_OP_RANGE_SIZE = (VK_ATTACHMENT_LOAD_OP_DONT_CARE - VK_ATTACHMENT_LOAD_OP_LOAD + 1), + VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentLoadOp; + +typedef enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, + VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE, + VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_RANGE_SIZE = (VK_ATTACHMENT_STORE_OP_DONT_CARE - VK_ATTACHMENT_STORE_OP_STORE + 1), + VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentStoreOp; + +typedef enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, + VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS, + VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE, + VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1), + VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF +} VkPipelineBindPoint; + +typedef enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, + VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + VK_COMMAND_BUFFER_LEVEL_END_RANGE = VK_COMMAND_BUFFER_LEVEL_SECONDARY, + VK_COMMAND_BUFFER_LEVEL_RANGE_SIZE = (VK_COMMAND_BUFFER_LEVEL_SECONDARY - VK_COMMAND_BUFFER_LEVEL_PRIMARY + 1), + VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferLevel; + +typedef enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16, + VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32, + VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1), + VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkIndexType; + +typedef enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, + VK_SUBPASS_CONTENTS_BEGIN_RANGE = VK_SUBPASS_CONTENTS_INLINE, + VK_SUBPASS_CONTENTS_END_RANGE = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS, + VK_SUBPASS_CONTENTS_RANGE_SIZE = (VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS - VK_SUBPASS_CONTENTS_INLINE + 1), + VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassContents; + +typedef enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = 1000085000, + VK_OBJECT_TYPE_OBJECT_TABLE_NVX = 1000086000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX = 1000086001, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = 1000156000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_BEGIN_RANGE = VK_OBJECT_TYPE_UNKNOWN, + VK_OBJECT_TYPE_END_RANGE = VK_OBJECT_TYPE_COMMAND_POOL, + VK_OBJECT_TYPE_RANGE_SIZE = (VK_OBJECT_TYPE_COMMAND_POOL - VK_OBJECT_TYPE_UNKNOWN + 1), + VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkObjectType; + +typedef VkFlags VkInstanceCreateFlags; + +typedef enum VkFormatFeatureFlagBits { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = 0x00004000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = 0x00008000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = 0x00010000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = 0x00020000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = 0x00040000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = 0x00080000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = 0x00100000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = 0x00200000, + VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = 0x00400000, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = 0x00800000, + VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFormatFeatureFlagBits; +typedef VkFlags VkFormatFeatureFlags; + +typedef enum VkImageUsageFlagBits { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, + VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageUsageFlagBits; +typedef VkFlags VkImageUsageFlags; + +typedef enum VkImageCreateFlagBits { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, + VK_IMAGE_CREATE_BIND_SFR_BIT_KHX = 0x00000040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = 0x00000020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = 0x00000080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = 0x00000100, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, + VK_IMAGE_CREATE_DISJOINT_BIT_KHR = 0x00000200, + VK_IMAGE_CREATE_ALIAS_BIT_KHR = 0x00000400, + VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageCreateFlagBits; +typedef VkFlags VkImageCreateFlags; + +typedef enum VkSampleCountFlagBits { + VK_SAMPLE_COUNT_1_BIT = 0x00000001, + VK_SAMPLE_COUNT_2_BIT = 0x00000002, + VK_SAMPLE_COUNT_4_BIT = 0x00000004, + VK_SAMPLE_COUNT_8_BIT = 0x00000008, + VK_SAMPLE_COUNT_16_BIT = 0x00000010, + VK_SAMPLE_COUNT_32_BIT = 0x00000020, + VK_SAMPLE_COUNT_64_BIT = 0x00000040, + VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSampleCountFlagBits; +typedef VkFlags VkSampleCountFlags; + +typedef enum VkQueueFlagBits { + VK_QUEUE_GRAPHICS_BIT = 0x00000001, + VK_QUEUE_COMPUTE_BIT = 0x00000002, + VK_QUEUE_TRANSFER_BIT = 0x00000004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, + VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueueFlagBits; +typedef VkFlags VkQueueFlags; + +typedef enum VkMemoryPropertyFlagBits { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, + VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryPropertyFlagBits; +typedef VkFlags VkMemoryPropertyFlags; + +typedef enum VkMemoryHeapFlagBits { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHX = 0x00000002, + VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryHeapFlagBits; +typedef VkFlags VkMemoryHeapFlags; +typedef VkFlags VkDeviceCreateFlags; +typedef VkFlags VkDeviceQueueCreateFlags; + +typedef enum VkPipelineStageFlagBits { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, + VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, + VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x00020000, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineStageFlagBits; +typedef VkFlags VkPipelineStageFlags; +typedef VkFlags VkMemoryMapFlags; + +typedef enum VkImageAspectFlagBits { + VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, + VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = 0x00000010, + VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = 0x00000020, + VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = 0x00000040, + VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageAspectFlagBits; +typedef VkFlags VkImageAspectFlags; + +typedef enum VkSparseImageFormatFlagBits { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, + VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseImageFormatFlagBits; +typedef VkFlags VkSparseImageFormatFlags; + +typedef enum VkSparseMemoryBindFlagBits { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, + VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseMemoryBindFlagBits; +typedef VkFlags VkSparseMemoryBindFlags; + +typedef enum VkFenceCreateFlagBits { + VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, + VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceCreateFlagBits; +typedef VkFlags VkFenceCreateFlags; +typedef VkFlags VkSemaphoreCreateFlags; +typedef VkFlags VkEventCreateFlags; +typedef VkFlags VkQueryPoolCreateFlags; + +typedef enum VkQueryPipelineStatisticFlagBits { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, + VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPipelineStatisticFlagBits; +typedef VkFlags VkQueryPipelineStatisticFlags; + +typedef enum VkQueryResultFlagBits { + VK_QUERY_RESULT_64_BIT = 0x00000001, + VK_QUERY_RESULT_WAIT_BIT = 0x00000002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, + VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryResultFlagBits; +typedef VkFlags VkQueryResultFlags; + +typedef enum VkBufferCreateFlagBits { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferCreateFlagBits; +typedef VkFlags VkBufferCreateFlags; + +typedef enum VkBufferUsageFlagBits { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, + VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferUsageFlagBits; +typedef VkFlags VkBufferUsageFlags; +typedef VkFlags VkBufferViewCreateFlags; +typedef VkFlags VkImageViewCreateFlags; +typedef VkFlags VkShaderModuleCreateFlags; +typedef VkFlags VkPipelineCacheCreateFlags; + +typedef enum VkPipelineCreateFlagBits { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHX = 0x00000008, + VK_PIPELINE_CREATE_DISPATCH_BASE_KHX = 0x00000010, + VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreateFlagBits; +typedef VkFlags VkPipelineCreateFlags; +typedef VkFlags VkPipelineShaderStageCreateFlags; + +typedef enum VkShaderStageFlagBits { + VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, + VK_SHADER_STAGE_ALL = 0x7FFFFFFF, + VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderStageFlagBits; +typedef VkFlags VkPipelineVertexInputStateCreateFlags; +typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; +typedef VkFlags VkPipelineTessellationStateCreateFlags; +typedef VkFlags VkPipelineViewportStateCreateFlags; +typedef VkFlags VkPipelineRasterizationStateCreateFlags; + +typedef enum VkCullModeFlagBits { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x00000001, + VK_CULL_MODE_BACK_BIT = 0x00000002, + VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, + VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCullModeFlagBits; +typedef VkFlags VkCullModeFlags; +typedef VkFlags VkPipelineMultisampleStateCreateFlags; +typedef VkFlags VkPipelineDepthStencilStateCreateFlags; +typedef VkFlags VkPipelineColorBlendStateCreateFlags; + +typedef enum VkColorComponentFlagBits { + VK_COLOR_COMPONENT_R_BIT = 0x00000001, + VK_COLOR_COMPONENT_G_BIT = 0x00000002, + VK_COLOR_COMPONENT_B_BIT = 0x00000004, + VK_COLOR_COMPONENT_A_BIT = 0x00000008, + VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkColorComponentFlagBits; +typedef VkFlags VkColorComponentFlags; +typedef VkFlags VkPipelineDynamicStateCreateFlags; +typedef VkFlags VkPipelineLayoutCreateFlags; +typedef VkFlags VkShaderStageFlags; +typedef VkFlags VkSamplerCreateFlags; + +typedef enum VkDescriptorSetLayoutCreateFlagBits { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorSetLayoutCreateFlagBits; +typedef VkFlags VkDescriptorSetLayoutCreateFlags; + +typedef enum VkDescriptorPoolCreateFlagBits { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, + VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorPoolCreateFlagBits; +typedef VkFlags VkDescriptorPoolCreateFlags; +typedef VkFlags VkDescriptorPoolResetFlags; +typedef VkFlags VkFramebufferCreateFlags; +typedef VkFlags VkRenderPassCreateFlags; + +typedef enum VkAttachmentDescriptionFlagBits { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, + VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentDescriptionFlagBits; +typedef VkFlags VkAttachmentDescriptionFlags; + +typedef enum VkSubpassDescriptionFlagBits { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, + VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassDescriptionFlagBits; +typedef VkFlags VkSubpassDescriptionFlags; + +typedef enum VkAccessFlagBits { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, + VK_ACCESS_INDEX_READ_BIT = 0x00000002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, + VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, + VK_ACCESS_SHADER_READ_BIT = 0x00000020, + VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, + VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, + VK_ACCESS_HOST_READ_BIT = 0x00002000, + VK_ACCESS_HOST_WRITE_BIT = 0x00004000, + VK_ACCESS_MEMORY_READ_BIT = 0x00008000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, + VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 0x00020000, + VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x00040000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, + VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAccessFlagBits; +typedef VkFlags VkAccessFlags; + +typedef enum VkDependencyFlagBits { + VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, + VK_DEPENDENCY_VIEW_LOCAL_BIT_KHX = 0x00000002, + VK_DEPENDENCY_DEVICE_GROUP_BIT_KHX = 0x00000004, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDependencyFlagBits; +typedef VkFlags VkDependencyFlags; + +typedef enum VkCommandPoolCreateFlagBits { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, + VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolCreateFlagBits; +typedef VkFlags VkCommandPoolCreateFlags; + +typedef enum VkCommandPoolResetFlagBits { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolResetFlagBits; +typedef VkFlags VkCommandPoolResetFlags; + +typedef enum VkCommandBufferUsageFlagBits { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, + VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferUsageFlagBits; +typedef VkFlags VkCommandBufferUsageFlags; + +typedef enum VkQueryControlFlagBits { + VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, + VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryControlFlagBits; +typedef VkFlags VkQueryControlFlags; + +typedef enum VkCommandBufferResetFlagBits { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferResetFlagBits; +typedef VkFlags VkCommandBufferResetFlags; + +typedef enum VkStencilFaceFlagBits { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FRONT_AND_BACK = 0x00000003, + VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkStencilFaceFlagBits; +typedef VkFlags VkStencilFaceFlags; + +typedef struct VkApplicationInfo { + VkStructureType sType; + const void* pNext; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; +} VkApplicationInfo; + +typedef struct VkInstanceCreateInfo { + VkStructureType sType; + const void* pNext; + VkInstanceCreateFlags flags; + const VkApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; +} VkInstanceCreateInfo; + +typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + +typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef struct VkAllocationCallbacks { + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; +} VkAllocationCallbacks; + +typedef struct VkPhysicalDeviceFeatures { + VkBool32 robustBufferAccess; + VkBool32 fullDrawIndexUint32; + VkBool32 imageCubeArray; + VkBool32 independentBlend; + VkBool32 geometryShader; + VkBool32 tessellationShader; + VkBool32 sampleRateShading; + VkBool32 dualSrcBlend; + VkBool32 logicOp; + VkBool32 multiDrawIndirect; + VkBool32 drawIndirectFirstInstance; + VkBool32 depthClamp; + VkBool32 depthBiasClamp; + VkBool32 fillModeNonSolid; + VkBool32 depthBounds; + VkBool32 wideLines; + VkBool32 largePoints; + VkBool32 alphaToOne; + VkBool32 multiViewport; + VkBool32 samplerAnisotropy; + VkBool32 textureCompressionETC2; + VkBool32 textureCompressionASTC_LDR; + VkBool32 textureCompressionBC; + VkBool32 occlusionQueryPrecise; + VkBool32 pipelineStatisticsQuery; + VkBool32 vertexPipelineStoresAndAtomics; + VkBool32 fragmentStoresAndAtomics; + VkBool32 shaderTessellationAndGeometryPointSize; + VkBool32 shaderImageGatherExtended; + VkBool32 shaderStorageImageExtendedFormats; + VkBool32 shaderStorageImageMultisample; + VkBool32 shaderStorageImageReadWithoutFormat; + VkBool32 shaderStorageImageWriteWithoutFormat; + VkBool32 shaderUniformBufferArrayDynamicIndexing; + VkBool32 shaderSampledImageArrayDynamicIndexing; + VkBool32 shaderStorageBufferArrayDynamicIndexing; + VkBool32 shaderStorageImageArrayDynamicIndexing; + VkBool32 shaderClipDistance; + VkBool32 shaderCullDistance; + VkBool32 shaderFloat64; + VkBool32 shaderInt64; + VkBool32 shaderInt16; + VkBool32 shaderResourceResidency; + VkBool32 shaderResourceMinLod; + VkBool32 sparseBinding; + VkBool32 sparseResidencyBuffer; + VkBool32 sparseResidencyImage2D; + VkBool32 sparseResidencyImage3D; + VkBool32 sparseResidency2Samples; + VkBool32 sparseResidency4Samples; + VkBool32 sparseResidency8Samples; + VkBool32 sparseResidency16Samples; + VkBool32 sparseResidencyAliased; + VkBool32 variableMultisampleRate; + VkBool32 inheritedQueries; +} VkPhysicalDeviceFeatures; + +typedef struct VkFormatProperties { + VkFormatFeatureFlags linearTilingFeatures; + VkFormatFeatureFlags optimalTilingFeatures; + VkFormatFeatureFlags bufferFeatures; +} VkFormatProperties; + +typedef struct VkExtent3D { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkExtent3D; + +typedef struct VkImageFormatProperties { + VkExtent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + VkSampleCountFlags sampleCounts; + VkDeviceSize maxResourceSize; +} VkImageFormatProperties; + +typedef struct VkPhysicalDeviceLimits { + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + VkDeviceSize bufferImageGranularity; + VkDeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + VkDeviceSize minTexelBufferOffsetAlignment; + VkDeviceSize minUniformBufferOffsetAlignment; + VkDeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + VkSampleCountFlags framebufferColorSampleCounts; + VkSampleCountFlags framebufferDepthSampleCounts; + VkSampleCountFlags framebufferStencilSampleCounts; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + VkSampleCountFlags sampledImageColorSampleCounts; + VkSampleCountFlags sampledImageIntegerSampleCounts; + VkSampleCountFlags sampledImageDepthSampleCounts; + VkSampleCountFlags sampledImageStencilSampleCounts; + VkSampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + VkBool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + VkBool32 strictLines; + VkBool32 standardSampleLocations; + VkDeviceSize optimalBufferCopyOffsetAlignment; + VkDeviceSize optimalBufferCopyRowPitchAlignment; + VkDeviceSize nonCoherentAtomSize; +} VkPhysicalDeviceLimits; + +typedef struct VkPhysicalDeviceSparseProperties { + VkBool32 residencyStandard2DBlockShape; + VkBool32 residencyStandard2DMultisampleBlockShape; + VkBool32 residencyStandard3DBlockShape; + VkBool32 residencyAlignedMipSize; + VkBool32 residencyNonResidentStrict; +} VkPhysicalDeviceSparseProperties; + +typedef struct VkPhysicalDeviceProperties { + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +} VkPhysicalDeviceProperties; + +typedef struct VkQueueFamilyProperties { + VkQueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + VkExtent3D minImageTransferGranularity; +} VkQueueFamilyProperties; + +typedef struct VkMemoryType { + VkMemoryPropertyFlags propertyFlags; + uint32_t heapIndex; +} VkMemoryType; + +typedef struct VkMemoryHeap { + VkDeviceSize size; + VkMemoryHeapFlags flags; +} VkMemoryHeap; + +typedef struct VkPhysicalDeviceMemoryProperties { + uint32_t memoryTypeCount; + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryProperties; + +typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +typedef struct VkExtensionProperties { + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; +} VkExtensionProperties; + +typedef struct VkLayerProperties { + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkLayerProperties; + +typedef struct VkSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + const VkPipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const VkCommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkSubmitInfo; + +typedef struct VkMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeIndex; +} VkMemoryAllocateInfo; + +typedef struct VkMappedMemoryRange { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMappedMemoryRange; + +typedef struct VkMemoryRequirements { + VkDeviceSize size; + VkDeviceSize alignment; + uint32_t memoryTypeBits; +} VkMemoryRequirements; + +typedef struct VkSparseImageFormatProperties { + VkImageAspectFlags aspectMask; + VkExtent3D imageGranularity; + VkSparseImageFormatFlags flags; +} VkSparseImageFormatProperties; + +typedef struct VkSparseImageMemoryRequirements { + VkSparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + VkDeviceSize imageMipTailSize; + VkDeviceSize imageMipTailOffset; + VkDeviceSize imageMipTailStride; +} VkSparseImageMemoryRequirements; + +typedef struct VkSparseMemoryBind { + VkDeviceSize resourceOffset; + VkDeviceSize size; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseMemoryBind; + +typedef struct VkSparseBufferMemoryBindInfo { + VkBuffer buffer; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseBufferMemoryBindInfo; + +typedef struct VkSparseImageOpaqueMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseImageOpaqueMemoryBindInfo; + +typedef struct VkImageSubresource { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; +} VkImageSubresource; + +typedef struct VkOffset3D { + int32_t x; + int32_t y; + int32_t z; +} VkOffset3D; + +typedef struct VkSparseImageMemoryBind { + VkImageSubresource subresource; + VkOffset3D offset; + VkExtent3D extent; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseImageMemoryBind; + +typedef struct VkSparseImageMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseImageMemoryBind* pBinds; +} VkSparseImageMemoryBindInfo; + +typedef struct VkBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const VkSparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const VkSparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkBindSparseInfo; + +typedef struct VkFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFenceCreateFlags flags; +} VkFenceCreateInfo; + +typedef struct VkSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreCreateFlags flags; +} VkSemaphoreCreateInfo; + +typedef struct VkEventCreateInfo { + VkStructureType sType; + const void* pNext; + VkEventCreateFlags flags; +} VkEventCreateInfo; + +typedef struct VkQueryPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueryPoolCreateFlags flags; + VkQueryType queryType; + uint32_t queryCount; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkQueryPoolCreateInfo; + +typedef struct VkBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkDeviceSize size; + VkBufferUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkBufferCreateInfo; + +typedef struct VkBufferViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferViewCreateFlags flags; + VkBuffer buffer; + VkFormat format; + VkDeviceSize offset; + VkDeviceSize range; +} VkBufferViewCreateInfo; + +typedef struct VkImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageType imageType; + VkFormat format; + VkExtent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + VkSampleCountFlagBits samples; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkImageLayout initialLayout; +} VkImageCreateInfo; + +typedef struct VkSubresourceLayout { + VkDeviceSize offset; + VkDeviceSize size; + VkDeviceSize rowPitch; + VkDeviceSize arrayPitch; + VkDeviceSize depthPitch; +} VkSubresourceLayout; + +typedef struct VkComponentMapping { + VkComponentSwizzle r; + VkComponentSwizzle g; + VkComponentSwizzle b; + VkComponentSwizzle a; +} VkComponentMapping; + +typedef struct VkImageSubresourceRange { + VkImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceRange; + +typedef struct VkImageViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageViewCreateFlags flags; + VkImage image; + VkImageViewType viewType; + VkFormat format; + VkComponentMapping components; + VkImageSubresourceRange subresourceRange; +} VkImageViewCreateInfo; + +typedef struct VkShaderModuleCreateInfo { + VkStructureType sType; + const void* pNext; + VkShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; +} VkShaderModuleCreateInfo; + +typedef struct VkPipelineCacheCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; +} VkPipelineCacheCreateInfo; + +typedef struct VkSpecializationMapEntry { + uint32_t constantID; + uint32_t offset; + size_t size; +} VkSpecializationMapEntry; + +typedef struct VkSpecializationInfo { + uint32_t mapEntryCount; + const VkSpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; +} VkSpecializationInfo; + +typedef struct VkPipelineShaderStageCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineShaderStageCreateFlags flags; + VkShaderStageFlagBits stage; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; +} VkPipelineShaderStageCreateInfo; + +typedef struct VkVertexInputBindingDescription { + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; +} VkVertexInputBindingDescription; + +typedef struct VkVertexInputAttributeDescription { + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription; + +typedef struct VkPipelineVertexInputStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VkVertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; +} VkPipelineVertexInputStateCreateInfo; + +typedef struct VkPipelineInputAssemblyStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineInputAssemblyStateCreateFlags flags; + VkPrimitiveTopology topology; + VkBool32 primitiveRestartEnable; +} VkPipelineInputAssemblyStateCreateInfo; + +typedef struct VkPipelineTessellationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; +} VkPipelineTessellationStateCreateInfo; + +typedef struct VkViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} VkViewport; + +typedef struct VkOffset2D { + int32_t x; + int32_t y; +} VkOffset2D; + +typedef struct VkExtent2D { + uint32_t width; + uint32_t height; +} VkExtent2D; + +typedef struct VkRect2D { + VkOffset2D offset; + VkExtent2D extent; +} VkRect2D; + +typedef struct VkPipelineViewportStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const VkViewport* pViewports; + uint32_t scissorCount; + const VkRect2D* pScissors; +} VkPipelineViewportStateCreateInfo; + +typedef struct VkPipelineRasterizationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateCreateFlags flags; + VkBool32 depthClampEnable; + VkBool32 rasterizerDiscardEnable; + VkPolygonMode polygonMode; + VkCullModeFlags cullMode; + VkFrontFace frontFace; + VkBool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; +} VkPipelineRasterizationStateCreateInfo; + +typedef struct VkPipelineMultisampleStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineMultisampleStateCreateFlags flags; + VkSampleCountFlagBits rasterizationSamples; + VkBool32 sampleShadingEnable; + float minSampleShading; + const VkSampleMask* pSampleMask; + VkBool32 alphaToCoverageEnable; + VkBool32 alphaToOneEnable; +} VkPipelineMultisampleStateCreateInfo; + +typedef struct VkStencilOpState { + VkStencilOp failOp; + VkStencilOp passOp; + VkStencilOp depthFailOp; + VkCompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; +} VkStencilOpState; + +typedef struct VkPipelineDepthStencilStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDepthStencilStateCreateFlags flags; + VkBool32 depthTestEnable; + VkBool32 depthWriteEnable; + VkCompareOp depthCompareOp; + VkBool32 depthBoundsTestEnable; + VkBool32 stencilTestEnable; + VkStencilOpState front; + VkStencilOpState back; + float minDepthBounds; + float maxDepthBounds; +} VkPipelineDepthStencilStateCreateInfo; + +typedef struct VkPipelineColorBlendAttachmentState { + VkBool32 blendEnable; + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; + VkColorComponentFlags colorWriteMask; +} VkPipelineColorBlendAttachmentState; + +typedef struct VkPipelineColorBlendStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineColorBlendStateCreateFlags flags; + VkBool32 logicOpEnable; + VkLogicOp logicOp; + uint32_t attachmentCount; + const VkPipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; +} VkPipelineColorBlendStateCreateInfo; + +typedef struct VkPipelineDynamicStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const VkDynamicState* pDynamicStates; +} VkPipelineDynamicStateCreateInfo; + +typedef struct VkGraphicsPipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; + const VkPipelineViewportStateCreateInfo* pViewportState; + const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkRenderPass renderPass; + uint32_t subpass; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkGraphicsPipelineCreateInfo; + +typedef struct VkComputePipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + VkPipelineShaderStageCreateInfo stage; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkComputePipelineCreateInfo; + +typedef struct VkPushConstantRange { + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; +} VkPushConstantRange; + +typedef struct VkPipelineLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkPipelineLayoutCreateInfo; + +typedef struct VkSamplerCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerCreateFlags flags; + VkFilter magFilter; + VkFilter minFilter; + VkSamplerMipmapMode mipmapMode; + VkSamplerAddressMode addressModeU; + VkSamplerAddressMode addressModeV; + VkSamplerAddressMode addressModeW; + float mipLodBias; + VkBool32 anisotropyEnable; + float maxAnisotropy; + VkBool32 compareEnable; + VkCompareOp compareOp; + float minLod; + float maxLod; + VkBorderColor borderColor; + VkBool32 unnormalizedCoordinates; +} VkSamplerCreateInfo; + +typedef struct VkDescriptorSetLayoutBinding { + uint32_t binding; + VkDescriptorType descriptorType; + uint32_t descriptorCount; + VkShaderStageFlags stageFlags; + const VkSampler* pImmutableSamplers; +} VkDescriptorSetLayoutBinding; + +typedef struct VkDescriptorSetLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const VkDescriptorSetLayoutBinding* pBindings; +} VkDescriptorSetLayoutCreateInfo; + +typedef struct VkDescriptorPoolSize { + VkDescriptorType type; + uint32_t descriptorCount; +} VkDescriptorPoolSize; + +typedef struct VkDescriptorPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const VkDescriptorPoolSize* pPoolSizes; +} VkDescriptorPoolCreateInfo; + +typedef struct VkDescriptorSetAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkDescriptorSetAllocateInfo; + +typedef struct VkDescriptorImageInfo { + VkSampler sampler; + VkImageView imageView; + VkImageLayout imageLayout; +} VkDescriptorImageInfo; + +typedef struct VkDescriptorBufferInfo { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize range; +} VkDescriptorBufferInfo; + +typedef struct VkWriteDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + const VkDescriptorImageInfo* pImageInfo; + const VkDescriptorBufferInfo* pBufferInfo; + const VkBufferView* pTexelBufferView; +} VkWriteDescriptorSet; + +typedef struct VkCopyDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; +} VkCopyDescriptorSet; + +typedef struct VkFramebufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkFramebufferCreateFlags flags; + VkRenderPass renderPass; + uint32_t attachmentCount; + const VkImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; +} VkFramebufferCreateInfo; + +typedef struct VkAttachmentDescription { + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription; + +typedef struct VkAttachmentReference { + uint32_t attachment; + VkImageLayout layout; +} VkAttachmentReference; + +typedef struct VkSubpassDescription { + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const VkAttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference* pColorAttachments; + const VkAttachmentReference* pResolveAttachments; + const VkAttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription; + +typedef struct VkSubpassDependency { + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; +} VkSubpassDependency; + +typedef struct VkRenderPassCreateInfo { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency* pDependencies; +} VkRenderPassCreateInfo; + +typedef struct VkCommandPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; +} VkCommandPoolCreateInfo; + +typedef struct VkCommandBufferAllocateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPool commandPool; + VkCommandBufferLevel level; + uint32_t commandBufferCount; +} VkCommandBufferAllocateInfo; + +typedef struct VkCommandBufferInheritanceInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + uint32_t subpass; + VkFramebuffer framebuffer; + VkBool32 occlusionQueryEnable; + VkQueryControlFlags queryFlags; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkCommandBufferInheritanceInfo; + +typedef struct VkCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + VkCommandBufferUsageFlags flags; + const VkCommandBufferInheritanceInfo* pInheritanceInfo; +} VkCommandBufferBeginInfo; + +typedef struct VkBufferCopy { + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy; + +typedef struct VkImageSubresourceLayers { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceLayers; + +typedef struct VkImageCopy { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy; + +typedef struct VkImageBlit { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit; + +typedef struct VkBufferImageCopy { + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy; + +typedef union VkClearColorValue { + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; +} VkClearColorValue; + +typedef struct VkClearDepthStencilValue { + float depth; + uint32_t stencil; +} VkClearDepthStencilValue; + +typedef union VkClearValue { + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +} VkClearValue; + +typedef struct VkClearAttachment { + VkImageAspectFlags aspectMask; + uint32_t colorAttachment; + VkClearValue clearValue; +} VkClearAttachment; + +typedef struct VkClearRect { + VkRect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkClearRect; + +typedef struct VkImageResolve { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve; + +typedef struct VkMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; +} VkMemoryBarrier; + +typedef struct VkBufferMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier; + +typedef struct VkImageMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier; + +typedef struct VkRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + VkFramebuffer framebuffer; + VkRect2D renderArea; + uint32_t clearValueCount; + const VkClearValue* pClearValues; +} VkRenderPassBeginInfo; + +typedef struct VkDispatchIndirectCommand { + uint32_t x; + uint32_t y; + uint32_t z; +} VkDispatchIndirectCommand; + +typedef struct VkDrawIndexedIndirectCommand { + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; +} VkDrawIndexedIndirectCommand; + +typedef struct VkDrawIndirectCommand { + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; +} VkDrawIndirectCommand; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); +typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); +typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); +typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); +typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); +typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); +typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); +typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); +typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); +typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); +typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); +typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); +typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); +typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); +typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); +typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); +typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); +typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); + +VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( + VkInstance instance, + const char* pName); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( + VkDevice device, + const char* pName); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( + VkDevice device, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( + VkDevice device, + uint32_t queueFamilyIndex, + uint32_t queueIndex, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo* pSubmits, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( + VkQueue queue); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( + VkDevice device, + const VkMemoryAllocateInfo* pAllocateInfo, + const VkAllocationCallbacks* pAllocator, + VkDeviceMemory* pMemory); + +VKAPI_ATTR void VKAPI_CALL vkFreeMemory( + VkDevice device, + VkDeviceMemory memory, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize offset, + VkDeviceSize size, + VkMemoryMapFlags flags, + void** ppData); + +VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( + VkDevice device, + VkDeviceMemory memory); + +VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize* pCommittedMemoryInBytes); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( + VkDevice device, + VkBuffer buffer, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( + VkDevice device, + VkImage image, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( + VkDevice device, + VkBuffer buffer, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( + VkDevice device, + VkImage image, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( + VkDevice device, + VkImage image, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( + VkQueue queue, + uint32_t bindInfoCount, + const VkBindSparseInfo* pBindInfo, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( + VkDevice device, + const VkFenceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFence( + VkDevice device, + VkFence fence, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( + VkDevice device, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences, + VkBool32 waitAll, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( + VkDevice device, + const VkSemaphoreCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSemaphore* pSemaphore); + +VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( + VkDevice device, + VkSemaphore semaphore, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( + VkDevice device, + const VkEventCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkEvent* pEvent); + +VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( + VkDevice device, + VkEvent event, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( + VkDevice device, + const VkQueryPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkQueryPool* pQueryPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( + VkDevice device, + VkQueryPool queryPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + void* pData, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( + VkDevice device, + const VkBufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBuffer* pBuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( + VkDevice device, + VkBuffer buffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( + VkDevice device, + const VkBufferViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBufferView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( + VkDevice device, + VkBufferView bufferView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( + VkDevice device, + const VkImageCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImage* pImage); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImage( + VkDevice device, + VkImage image, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( + VkDevice device, + VkImage image, + const VkImageSubresource* pSubresource, + VkSubresourceLayout* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( + VkDevice device, + const VkImageViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImageView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( + VkDevice device, + VkImageView imageView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkShaderModule* pShaderModule); + +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( + VkDevice device, + VkShaderModule shaderModule, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( + VkDevice device, + const VkPipelineCacheCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineCache* pPipelineCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( + VkDevice device, + VkPipelineCache pipelineCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( + VkDevice device, + VkPipelineCache pipelineCache, + size_t* pDataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( + VkDevice device, + VkPipelineCache dstCache, + uint32_t srcCacheCount, + const VkPipelineCache* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( + VkDevice device, + VkPipeline pipeline, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( + VkDevice device, + const VkPipelineLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineLayout* pPipelineLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( + VkDevice device, + VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( + VkDevice device, + const VkSamplerCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSampler* pSampler); + +VKAPI_ATTR void VKAPI_CALL vkDestroySampler( + VkDevice device, + VkSampler sampler, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorSetLayout* pSetLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( + VkDevice device, + VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( + VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( + VkDevice device, + VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( + VkDevice device, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet* pDescriptorCopies); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( + VkDevice device, + const VkFramebufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFramebuffer* pFramebuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( + VkDevice device, + VkFramebuffer framebuffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( + VkDevice device, + const VkRenderPassCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( + VkDevice device, + VkRenderPass renderPass, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( + VkDevice device, + VkRenderPass renderPass, + VkExtent2D* pGranularity); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( + VkDevice device, + const VkCommandPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCommandPool* pCommandPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( + VkDevice device, + VkCommandPool commandPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( + VkDevice device, + const VkCommandBufferAllocateInfo* pAllocateInfo, + VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( + VkDevice device, + VkCommandPool commandPool, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( + VkCommandBuffer commandBuffer, + VkCommandBufferResetFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( + VkCommandBuffer commandBuffer, + uint32_t firstScissor, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( + VkCommandBuffer commandBuffer, + float lineWidth); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( + VkCommandBuffer commandBuffer, + float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( + VkCommandBuffer commandBuffer, + const float blendConstants[4]); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( + VkCommandBuffer commandBuffer, + float minDepthBounds, + float maxDepthBounds); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t compareMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t writeMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t reference); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets, + uint32_t dynamicOffsetCount, + const uint32_t* pDynamicOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdDraw( + VkCommandBuffer commandBuffer, + uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( + VkCommandBuffer commandBuffer, + uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageBlit* pRegions, + VkFilter filter); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize dataSize, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize size, + uint32_t data); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearColorValue* pColor, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue* pDepthStencil, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkClearAttachment* pAttachments, + uint32_t rectCount, + const VkClearRect* pRects); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageResolve* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( + VkCommandBuffer commandBuffer, + VkPipelineLayout layout, + VkShaderStageFlags stageFlags, + uint32_t offset, + uint32_t size, + const void* pValues); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( + VkCommandBuffer commandBuffer, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); +#endif + +#define VK_KHR_surface 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) + +#define VK_KHR_SURFACE_SPEC_VERSION 25 +#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" +#define VK_COLORSPACE_SRGB_NONLINEAR_KHR VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + + +typedef enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, + VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, + VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, + VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, + VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, + VK_COLOR_SPACE_BEGIN_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_END_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_RANGE_SIZE_KHR = (VK_COLOR_SPACE_SRGB_NONLINEAR_KHR - VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + 1), + VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkColorSpaceKHR; + +typedef enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, + VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, + VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, + VK_PRESENT_MODE_BEGIN_RANGE_KHR = VK_PRESENT_MODE_IMMEDIATE_KHR, + VK_PRESENT_MODE_END_RANGE_KHR = VK_PRESENT_MODE_FIFO_RELAXED_KHR, + VK_PRESENT_MODE_RANGE_SIZE_KHR = (VK_PRESENT_MODE_FIFO_RELAXED_KHR - VK_PRESENT_MODE_IMMEDIATE_KHR + 1), + VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentModeKHR; + + +typedef enum VkSurfaceTransformFlagBitsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, + VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSurfaceTransformFlagBitsKHR; +typedef VkFlags VkSurfaceTransformFlagsKHR; + +typedef enum VkCompositeAlphaFlagBitsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, + VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCompositeAlphaFlagBitsKHR; +typedef VkFlags VkCompositeAlphaFlagsKHR; + +typedef struct VkSurfaceCapabilitiesKHR { + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; +} VkSurfaceCapabilitiesKHR; + +typedef struct VkSurfaceFormatKHR { + VkFormat format; + VkColorSpaceKHR colorSpace; +} VkSurfaceFormatKHR; + + +typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( + VkInstance instance, + VkSurfaceKHR surface, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + VkSurfaceKHR surface, + VkBool32* pSupported); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormatKHR* pSurfaceFormats); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif + +#define VK_KHR_swapchain 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) + +#define VK_KHR_SWAPCHAIN_SPEC_VERSION 68 +#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" + + +typedef enum VkSwapchainCreateFlagBitsKHR { + VK_SWAPCHAIN_CREATE_BIND_SFR_BIT_KHX = 0x00000001, + VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSwapchainCreateFlagBitsKHR; +typedef VkFlags VkSwapchainCreateFlagsKHR; + +typedef struct VkSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainCreateFlagsKHR flags; + VkSurfaceKHR surface; + uint32_t minImageCount; + VkFormat imageFormat; + VkColorSpaceKHR imageColorSpace; + VkExtent2D imageExtent; + uint32_t imageArrayLayers; + VkImageUsageFlags imageUsage; + VkSharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkSurfaceTransformFlagBitsKHR preTransform; + VkCompositeAlphaFlagBitsKHR compositeAlpha; + VkPresentModeKHR presentMode; + VkBool32 clipped; + VkSwapchainKHR oldSwapchain; +} VkSwapchainCreateInfoKHR; + +typedef struct VkPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t swapchainCount; + const VkSwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + VkResult* pResults; +} VkPresentInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); +typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); +typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( + VkDevice device, + const VkSwapchainCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchain); + +VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pSwapchainImageCount, + VkImage* pSwapchainImages); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t timeout, + VkSemaphore semaphore, + VkFence fence, + uint32_t* pImageIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( + VkQueue queue, + const VkPresentInfoKHR* pPresentInfo); +#endif + +#define VK_KHR_display 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) + +#define VK_KHR_DISPLAY_SPEC_VERSION 21 +#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" + + +typedef enum VkDisplayPlaneAlphaFlagBitsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, + VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDisplayPlaneAlphaFlagBitsKHR; +typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; +typedef VkFlags VkDisplayModeCreateFlagsKHR; +typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; + +typedef struct VkDisplayPropertiesKHR { + VkDisplayKHR display; + const char* displayName; + VkExtent2D physicalDimensions; + VkExtent2D physicalResolution; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkBool32 planeReorderPossible; + VkBool32 persistentContent; +} VkDisplayPropertiesKHR; + +typedef struct VkDisplayModeParametersKHR { + VkExtent2D visibleRegion; + uint32_t refreshRate; +} VkDisplayModeParametersKHR; + +typedef struct VkDisplayModePropertiesKHR { + VkDisplayModeKHR displayMode; + VkDisplayModeParametersKHR parameters; +} VkDisplayModePropertiesKHR; + +typedef struct VkDisplayModeCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeCreateFlagsKHR flags; + VkDisplayModeParametersKHR parameters; +} VkDisplayModeCreateInfoKHR; + +typedef struct VkDisplayPlaneCapabilitiesKHR { + VkDisplayPlaneAlphaFlagsKHR supportedAlpha; + VkOffset2D minSrcPosition; + VkOffset2D maxSrcPosition; + VkExtent2D minSrcExtent; + VkExtent2D maxSrcExtent; + VkOffset2D minDstPosition; + VkOffset2D maxDstPosition; + VkExtent2D minDstExtent; + VkExtent2D maxDstExtent; +} VkDisplayPlaneCapabilitiesKHR; + +typedef struct VkDisplayPlanePropertiesKHR { + VkDisplayKHR currentDisplay; + uint32_t currentStackIndex; +} VkDisplayPlanePropertiesKHR; + +typedef struct VkDisplaySurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceCreateFlagsKHR flags; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkDisplaySurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlanePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( + VkPhysicalDevice physicalDevice, + uint32_t planeIndex, + uint32_t* pDisplayCount, + VkDisplayKHR* pDisplays); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDisplayModeKHR* pMode); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR* pCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( + VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#define VK_KHR_display_swapchain 1 +#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 9 +#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" + +typedef struct VkDisplayPresentInfoKHR { + VkStructureType sType; + const void* pNext; + VkRect2D srcRect; + VkRect2D dstRect; + VkBool32 persistent; +} VkDisplayPresentInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchains); +#endif + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#define VK_KHR_xlib_surface 1 +#include + +#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface" + +typedef VkFlags VkXlibSurfaceCreateFlagsKHR; + +typedef struct VkXlibSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXlibSurfaceCreateFlagsKHR flags; + Display* dpy; + Window window; +} VkXlibSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR( + VkInstance instance, + const VkXlibSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + Display* dpy, + VisualID visualID); +#endif +#endif /* VK_USE_PLATFORM_XLIB_KHR */ + +#ifdef VK_USE_PLATFORM_XCB_KHR +#define VK_KHR_xcb_surface 1 +#include + +#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface" + +typedef VkFlags VkXcbSurfaceCreateFlagsKHR; + +typedef struct VkXcbSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXcbSurfaceCreateFlagsKHR flags; + xcb_connection_t* connection; + xcb_window_t window; +} VkXcbSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR( + VkInstance instance, + const VkXcbSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t* connection, + xcb_visualid_t visual_id); +#endif +#endif /* VK_USE_PLATFORM_XCB_KHR */ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#define VK_KHR_wayland_surface 1 +#include + +#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface" + +typedef VkFlags VkWaylandSurfaceCreateFlagsKHR; + +typedef struct VkWaylandSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWaylandSurfaceCreateFlagsKHR flags; + struct wl_display* display; + struct wl_surface* surface; +} VkWaylandSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR( + VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display* display); +#endif +#endif /* VK_USE_PLATFORM_WAYLAND_KHR */ + +#ifdef VK_USE_PLATFORM_MIR_KHR +#define VK_KHR_mir_surface 1 +#include + +#define VK_KHR_MIR_SURFACE_SPEC_VERSION 4 +#define VK_KHR_MIR_SURFACE_EXTENSION_NAME "VK_KHR_mir_surface" + +typedef VkFlags VkMirSurfaceCreateFlagsKHR; + +typedef struct VkMirSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkMirSurfaceCreateFlagsKHR flags; + MirConnection* connection; + MirSurface* mirSurface; +} VkMirSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMirSurfaceKHR)(VkInstance instance, const VkMirSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, MirConnection* connection); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR( + VkInstance instance, + const VkMirSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + MirConnection* connection); +#endif +#endif /* VK_USE_PLATFORM_MIR_KHR */ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#define VK_KHR_android_surface 1 + +#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6 +#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface" + +typedef VkFlags VkAndroidSurfaceCreateFlagsKHR; + +typedef struct VkAndroidSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAndroidSurfaceCreateFlagsKHR flags; + struct ANativeWindow* window; +} VkAndroidSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR( + VkInstance instance, + const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif +#endif /* VK_USE_PLATFORM_ANDROID_KHR */ + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#define VK_KHR_win32_surface 1 +#include + +#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface" + +typedef VkFlags VkWin32SurfaceCreateFlagsKHR; + +typedef struct VkWin32SurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWin32SurfaceCreateFlagsKHR flags; + HINSTANCE hinstance; + HWND hwnd; +} VkWin32SurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR( + VkInstance instance, + const VkWin32SurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex); +#endif +#endif /* VK_USE_PLATFORM_WIN32_KHR */ + +#define VK_KHR_sampler_mirror_clamp_to_edge 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" + + +#define VK_KHR_get_physical_device_properties2 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" + +typedef struct VkPhysicalDeviceFeatures2KHR { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceFeatures features; +} VkPhysicalDeviceFeatures2KHR; + +typedef struct VkPhysicalDeviceProperties2KHR { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties properties; +} VkPhysicalDeviceProperties2KHR; + +typedef struct VkFormatProperties2KHR { + VkStructureType sType; + void* pNext; + VkFormatProperties formatProperties; +} VkFormatProperties2KHR; + +typedef struct VkImageFormatProperties2KHR { + VkStructureType sType; + void* pNext; + VkImageFormatProperties imageFormatProperties; +} VkImageFormatProperties2KHR; + +typedef struct VkPhysicalDeviceImageFormatInfo2KHR { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkImageCreateFlags flags; +} VkPhysicalDeviceImageFormatInfo2KHR; + +typedef struct VkQueueFamilyProperties2KHR { + VkStructureType sType; + void* pNext; + VkQueueFamilyProperties queueFamilyProperties; +} VkQueueFamilyProperties2KHR; + +typedef struct VkPhysicalDeviceMemoryProperties2KHR { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceMemoryProperties memoryProperties; +} VkPhysicalDeviceMemoryProperties2KHR; + +typedef struct VkSparseImageFormatProperties2KHR { + VkStructureType sType; + void* pNext; + VkSparseImageFormatProperties properties; +} VkSparseImageFormatProperties2KHR; + +typedef struct VkPhysicalDeviceSparseImageFormatInfo2KHR { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkSampleCountFlagBits samples; + VkImageUsageFlags usage; + VkImageTiling tiling; +} VkPhysicalDeviceSparseImageFormatInfo2KHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2KHR* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2KHR* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2KHR* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2KHR* pImageFormatInfo, VkImageFormatProperties2KHR* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2KHR* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2KHR* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2KHR* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2KHR* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2KHR* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2KHR* pImageFormatInfo, + VkImageFormatProperties2KHR* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2KHR* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2KHR* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2KHR* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2KHR* pProperties); +#endif + +#define VK_KHR_shader_draw_parameters 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" + + +#define VK_KHR_maintenance1 1 +#define VK_KHR_MAINTENANCE1_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1" + +typedef VkFlags VkCommandPoolTrimFlagsKHR; + +typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlagsKHR flags); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlagsKHR flags); +#endif + +#define VK_KHR_external_memory_capabilities 1 +#define VK_LUID_SIZE_KHR 8 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" + + +typedef enum VkExternalMemoryHandleTypeFlagBitsKHR { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = 0x00000010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = 0x00000020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = 0x00000040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsKHR; +typedef VkFlags VkExternalMemoryHandleTypeFlagsKHR; + +typedef enum VkExternalMemoryFeatureFlagBitsKHR { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsKHR; +typedef VkFlags VkExternalMemoryFeatureFlagsKHR; + +typedef struct VkExternalMemoryPropertiesKHR { + VkExternalMemoryFeatureFlagsKHR externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsKHR exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsKHR compatibleHandleTypes; +} VkExternalMemoryPropertiesKHR; + +typedef struct VkPhysicalDeviceExternalImageFormatInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBitsKHR handleType; +} VkPhysicalDeviceExternalImageFormatInfoKHR; + +typedef struct VkExternalImageFormatPropertiesKHR { + VkStructureType sType; + void* pNext; + VkExternalMemoryPropertiesKHR externalMemoryProperties; +} VkExternalImageFormatPropertiesKHR; + +typedef struct VkPhysicalDeviceExternalBufferInfoKHR { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkBufferUsageFlags usage; + VkExternalMemoryHandleTypeFlagBitsKHR handleType; +} VkPhysicalDeviceExternalBufferInfoKHR; + +typedef struct VkExternalBufferPropertiesKHR { + VkStructureType sType; + void* pNext; + VkExternalMemoryPropertiesKHR externalMemoryProperties; +} VkExternalBufferPropertiesKHR; + +typedef struct VkPhysicalDeviceIDPropertiesKHR { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE_KHR]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; +} VkPhysicalDeviceIDPropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo, VkExternalBufferPropertiesKHR* pExternalBufferProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo, + VkExternalBufferPropertiesKHR* pExternalBufferProperties); +#endif + +#define VK_KHR_external_memory 1 +#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" +#define VK_QUEUE_FAMILY_EXTERNAL_KHR (~0U-1) + +typedef struct VkExternalMemoryImageCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsKHR handleTypes; +} VkExternalMemoryImageCreateInfoKHR; + +typedef struct VkExternalMemoryBufferCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsKHR handleTypes; +} VkExternalMemoryBufferCreateInfoKHR; + +typedef struct VkExportMemoryAllocateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsKHR handleTypes; +} VkExportMemoryAllocateInfoKHR; + + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#define VK_KHR_external_memory_win32 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32" + +typedef struct VkImportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBitsKHR handleType; + HANDLE handle; + LPCWSTR name; +} VkImportMemoryWin32HandleInfoKHR; + +typedef struct VkExportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportMemoryWin32HandleInfoKHR; + +typedef struct VkMemoryWin32HandlePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryWin32HandlePropertiesKHR; + +typedef struct VkMemoryGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBitsKHR handleType; +} VkMemoryGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBitsKHR handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR( + VkDevice device, + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBitsKHR handleType, + HANDLE handle, + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); +#endif +#endif /* VK_USE_PLATFORM_WIN32_KHR */ + +#define VK_KHR_external_memory_fd 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" + +typedef struct VkImportMemoryFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBitsKHR handleType; + int fd; +} VkImportMemoryFdInfoKHR; + +typedef struct VkMemoryFdPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryFdPropertiesKHR; + +typedef struct VkMemoryGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBitsKHR handleType; +} VkMemoryGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBitsKHR handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBitsKHR handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties); +#endif + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#define VK_KHR_win32_keyed_mutex 1 +#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex" + +typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeouts; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoKHR; + + +#endif /* VK_USE_PLATFORM_WIN32_KHR */ + +#define VK_KHR_external_semaphore_capabilities 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" + + +typedef enum VkExternalSemaphoreHandleTypeFlagBitsKHR { + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = 0x00000001, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = 0x00000002, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = 0x00000004, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = 0x00000008, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = 0x00000010, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkExternalSemaphoreHandleTypeFlagBitsKHR; +typedef VkFlags VkExternalSemaphoreHandleTypeFlagsKHR; + +typedef enum VkExternalSemaphoreFeatureFlagBitsKHR { + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = 0x00000001, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = 0x00000002, + VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkExternalSemaphoreFeatureFlagBitsKHR; +typedef VkFlags VkExternalSemaphoreFeatureFlagsKHR; + +typedef struct VkPhysicalDeviceExternalSemaphoreInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagBitsKHR handleType; +} VkPhysicalDeviceExternalSemaphoreInfoKHR; + +typedef struct VkExternalSemaphorePropertiesKHR { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlagsKHR exportFromImportedHandleTypes; + VkExternalSemaphoreHandleTypeFlagsKHR compatibleHandleTypes; + VkExternalSemaphoreFeatureFlagsKHR externalSemaphoreFeatures; +} VkExternalSemaphorePropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo, VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo, + VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties); +#endif + +#define VK_KHR_external_semaphore 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" + + +typedef enum VkSemaphoreImportFlagBitsKHR { + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = 0x00000001, + VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSemaphoreImportFlagBitsKHR; +typedef VkFlags VkSemaphoreImportFlagsKHR; + +typedef struct VkExportSemaphoreCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagsKHR handleTypes; +} VkExportSemaphoreCreateInfoKHR; + + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#define VK_KHR_external_semaphore_win32 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32" + +typedef struct VkImportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlagsKHR flags; + VkExternalSemaphoreHandleTypeFlagBitsKHR handleType; + HANDLE handle; + LPCWSTR name; +} VkImportSemaphoreWin32HandleInfoKHR; + +typedef struct VkExportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportSemaphoreWin32HandleInfoKHR; + +typedef struct VkD3D12FenceSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValuesCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValuesCount; + const uint64_t* pSignalSemaphoreValues; +} VkD3D12FenceSubmitInfoKHR; + +typedef struct VkSemaphoreGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBitsKHR handleType; +} VkSemaphoreGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR( + VkDevice device, + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR( + VkDevice device, + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif +#endif /* VK_USE_PLATFORM_WIN32_KHR */ + +#define VK_KHR_external_semaphore_fd 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" + +typedef struct VkImportSemaphoreFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlagsKHR flags; + VkExternalSemaphoreHandleTypeFlagBitsKHR handleType; + int fd; +} VkImportSemaphoreFdInfoKHR; + +typedef struct VkSemaphoreGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBitsKHR handleType; +} VkSemaphoreGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + +#define VK_KHR_push_descriptor 1 +#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 1 +#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" + +typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorPropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); +#endif + +#define VK_KHR_16bit_storage 1 +#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" + +typedef struct VkPhysicalDevice16BitStorageFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; +} VkPhysicalDevice16BitStorageFeaturesKHR; + + + +#define VK_KHR_incremental_present 1 +#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1 +#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" + +typedef struct VkRectLayerKHR { + VkOffset2D offset; + VkExtent2D extent; + uint32_t layer; +} VkRectLayerKHR; + +typedef struct VkPresentRegionKHR { + uint32_t rectangleCount; + const VkRectLayerKHR* pRectangles; +} VkPresentRegionKHR; + +typedef struct VkPresentRegionsKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentRegionKHR* pRegions; +} VkPresentRegionsKHR; + + + +#define VK_KHR_descriptor_update_template 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplateKHR) + +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" + + +typedef enum VkDescriptorUpdateTemplateTypeKHR { + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = 0, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_BEGIN_RANGE_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_END_RANGE_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_RANGE_SIZE_KHR = (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR - VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR + 1), + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDescriptorUpdateTemplateTypeKHR; + +typedef VkFlags VkDescriptorUpdateTemplateCreateFlagsKHR; + +typedef struct VkDescriptorUpdateTemplateEntryKHR { + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + size_t offset; + size_t stride; +} VkDescriptorUpdateTemplateEntryKHR; + +typedef struct VkDescriptorUpdateTemplateCreateInfoKHR { + VkStructureType sType; + void* pNext; + VkDescriptorUpdateTemplateCreateFlagsKHR flags; + uint32_t descriptorUpdateEntryCount; + const VkDescriptorUpdateTemplateEntryKHR* pDescriptorUpdateEntries; + VkDescriptorUpdateTemplateTypeKHR templateType; + VkDescriptorSetLayout descriptorSetLayout; + VkPipelineBindPoint pipelineBindPoint; + VkPipelineLayout pipelineLayout; + uint32_t set; +} VkDescriptorUpdateTemplateCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); +#endif + +#define VK_KHR_shared_presentable_image 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" + +typedef struct VkSharedPresentSurfaceCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkImageUsageFlags sharedPresentSupportedUsageFlags; +} VkSharedPresentSurfaceCapabilitiesKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain); +#endif + +#define VK_KHR_external_fence_capabilities 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" + + +typedef enum VkExternalFenceHandleTypeFlagBitsKHR { + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = 0x00000001, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = 0x00000002, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = 0x00000004, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = 0x00000008, + VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkExternalFenceHandleTypeFlagBitsKHR; +typedef VkFlags VkExternalFenceHandleTypeFlagsKHR; + +typedef enum VkExternalFenceFeatureFlagBitsKHR { + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = 0x00000001, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = 0x00000002, + VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkExternalFenceFeatureFlagBitsKHR; +typedef VkFlags VkExternalFenceFeatureFlagsKHR; + +typedef struct VkPhysicalDeviceExternalFenceInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagBitsKHR handleType; +} VkPhysicalDeviceExternalFenceInfoKHR; + +typedef struct VkExternalFencePropertiesKHR { + VkStructureType sType; + void* pNext; + VkExternalFenceHandleTypeFlagsKHR exportFromImportedHandleTypes; + VkExternalFenceHandleTypeFlagsKHR compatibleHandleTypes; + VkExternalFenceFeatureFlagsKHR externalFenceFeatures; +} VkExternalFencePropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo, VkExternalFencePropertiesKHR* pExternalFenceProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo, + VkExternalFencePropertiesKHR* pExternalFenceProperties); +#endif + +#define VK_KHR_external_fence 1 +#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" + + +typedef enum VkFenceImportFlagBitsKHR { + VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = 0x00000001, + VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkFenceImportFlagBitsKHR; +typedef VkFlags VkFenceImportFlagsKHR; + +typedef struct VkExportFenceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagsKHR handleTypes; +} VkExportFenceCreateInfoKHR; + + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#define VK_KHR_external_fence_win32 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32" + +typedef struct VkImportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlagsKHR flags; + VkExternalFenceHandleTypeFlagBitsKHR handleType; + HANDLE handle; + LPCWSTR name; +} VkImportFenceWin32HandleInfoKHR; + +typedef struct VkExportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportFenceWin32HandleInfoKHR; + +typedef struct VkFenceGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBitsKHR handleType; +} VkFenceGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR( + VkDevice device, + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR( + VkDevice device, + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif +#endif /* VK_USE_PLATFORM_WIN32_KHR */ + +#define VK_KHR_external_fence_fd 1 +#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" + +typedef struct VkImportFenceFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlagsKHR flags; + VkExternalFenceHandleTypeFlagBitsKHR handleType; + int fd; +} VkImportFenceFdInfoKHR; + +typedef struct VkFenceGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBitsKHR handleType; +} VkFenceGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + +#define VK_KHR_maintenance2 1 +#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2" + + +typedef enum VkPointClippingBehaviorKHR { + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = 0, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = 1, + VK_POINT_CLIPPING_BEHAVIOR_BEGIN_RANGE_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR, + VK_POINT_CLIPPING_BEHAVIOR_END_RANGE_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR, + VK_POINT_CLIPPING_BEHAVIOR_RANGE_SIZE_KHR = (VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR - VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR + 1), + VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPointClippingBehaviorKHR; + +typedef enum VkTessellationDomainOriginKHR { + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = 0, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = 1, + VK_TESSELLATION_DOMAIN_ORIGIN_BEGIN_RANGE_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR, + VK_TESSELLATION_DOMAIN_ORIGIN_END_RANGE_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR, + VK_TESSELLATION_DOMAIN_ORIGIN_RANGE_SIZE_KHR = (VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR - VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR + 1), + VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM_KHR = 0x7FFFFFFF +} VkTessellationDomainOriginKHR; + +typedef struct VkPhysicalDevicePointClippingPropertiesKHR { + VkStructureType sType; + void* pNext; + VkPointClippingBehaviorKHR pointClippingBehavior; +} VkPhysicalDevicePointClippingPropertiesKHR; + +typedef struct VkInputAttachmentAspectReferenceKHR { + uint32_t subpass; + uint32_t inputAttachmentIndex; + VkImageAspectFlags aspectMask; +} VkInputAttachmentAspectReferenceKHR; + +typedef struct VkRenderPassInputAttachmentAspectCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t aspectReferenceCount; + const VkInputAttachmentAspectReferenceKHR* pAspectReferences; +} VkRenderPassInputAttachmentAspectCreateInfoKHR; + +typedef struct VkImageViewUsageCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags usage; +} VkImageViewUsageCreateInfoKHR; + +typedef struct VkPipelineTessellationDomainOriginStateCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkTessellationDomainOriginKHR domainOrigin; +} VkPipelineTessellationDomainOriginStateCreateInfoKHR; + + + +#define VK_KHR_get_surface_capabilities2 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" + +typedef struct VkPhysicalDeviceSurfaceInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSurfaceKHR surface; +} VkPhysicalDeviceSurfaceInfo2KHR; + +typedef struct VkSurfaceCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceCapabilitiesKHR surfaceCapabilities; +} VkSurfaceCapabilities2KHR; + +typedef struct VkSurfaceFormat2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceFormatKHR surfaceFormat; +} VkSurfaceFormat2KHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkSurfaceCapabilities2KHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormat2KHR* pSurfaceFormats); +#endif + +#define VK_KHR_variable_pointers 1 +#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 +#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" + +typedef struct VkPhysicalDeviceVariablePointerFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; +} VkPhysicalDeviceVariablePointerFeaturesKHR; + + + +#define VK_KHR_dedicated_allocation 1 +#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 +#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" + +typedef struct VkMemoryDedicatedRequirementsKHR { + VkStructureType sType; + void* pNext; + VkBool32 prefersDedicatedAllocation; + VkBool32 requiresDedicatedAllocation; +} VkMemoryDedicatedRequirementsKHR; + +typedef struct VkMemoryDedicatedAllocateInfoKHR { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkMemoryDedicatedAllocateInfoKHR; + + + +#define VK_KHR_storage_buffer_storage_class 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" + + +#define VK_KHR_relaxed_block_layout 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" + + +#define VK_KHR_get_memory_requirements2 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" + +typedef struct VkBufferMemoryRequirementsInfo2KHR { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferMemoryRequirementsInfo2KHR; + +typedef struct VkImageMemoryRequirementsInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageMemoryRequirementsInfo2KHR; + +typedef struct VkImageSparseMemoryRequirementsInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageSparseMemoryRequirementsInfo2KHR; + +typedef struct VkMemoryRequirements2KHR { + VkStructureType sType; + void* pNext; + VkMemoryRequirements memoryRequirements; +} VkMemoryRequirements2KHR; + +typedef struct VkSparseImageMemoryRequirements2KHR { + VkStructureType sType; + void* pNext; + VkSparseImageMemoryRequirements memoryRequirements; +} VkSparseImageMemoryRequirements2KHR; + + +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2KHR* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2KHR* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2KHR* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2KHR* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements); +#endif + +#define VK_KHR_image_format_list 1 +#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 +#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" + +typedef struct VkImageFormatListCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkImageFormatListCreateInfoKHR; + + + +#define VK_KHR_sampler_ycbcr_conversion 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversionKHR) + +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 1 +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" + + +typedef enum VkSamplerYcbcrModelConversionKHR { + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = 0, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = 1, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = 2, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = 3, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = 4, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_BEGIN_RANGE_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_END_RANGE_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RANGE_SIZE_KHR = (VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR - VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR + 1), + VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSamplerYcbcrModelConversionKHR; + +typedef enum VkSamplerYcbcrRangeKHR { + VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = 0, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = 1, + VK_SAMPLER_YCBCR_RANGE_BEGIN_RANGE_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR, + VK_SAMPLER_YCBCR_RANGE_END_RANGE_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR, + VK_SAMPLER_YCBCR_RANGE_RANGE_SIZE_KHR = (VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR - VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR + 1), + VK_SAMPLER_YCBCR_RANGE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSamplerYcbcrRangeKHR; + +typedef enum VkChromaLocationKHR { + VK_CHROMA_LOCATION_COSITED_EVEN_KHR = 0, + VK_CHROMA_LOCATION_MIDPOINT_KHR = 1, + VK_CHROMA_LOCATION_BEGIN_RANGE_KHR = VK_CHROMA_LOCATION_COSITED_EVEN_KHR, + VK_CHROMA_LOCATION_END_RANGE_KHR = VK_CHROMA_LOCATION_MIDPOINT_KHR, + VK_CHROMA_LOCATION_RANGE_SIZE_KHR = (VK_CHROMA_LOCATION_MIDPOINT_KHR - VK_CHROMA_LOCATION_COSITED_EVEN_KHR + 1), + VK_CHROMA_LOCATION_MAX_ENUM_KHR = 0x7FFFFFFF +} VkChromaLocationKHR; + +typedef struct VkSamplerYcbcrConversionCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkSamplerYcbcrModelConversionKHR ycbcrModel; + VkSamplerYcbcrRangeKHR ycbcrRange; + VkComponentMapping components; + VkChromaLocationKHR xChromaOffset; + VkChromaLocationKHR yChromaOffset; + VkFilter chromaFilter; + VkBool32 forceExplicitReconstruction; +} VkSamplerYcbcrConversionCreateInfoKHR; + +typedef struct VkSamplerYcbcrConversionInfoKHR { + VkStructureType sType; + const void* pNext; + VkSamplerYcbcrConversionKHR conversion; +} VkSamplerYcbcrConversionInfoKHR; + +typedef struct VkBindImagePlaneMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkBindImagePlaneMemoryInfoKHR; + +typedef struct VkImagePlaneMemoryRequirementsInfoKHR { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkImagePlaneMemoryRequirementsInfoKHR; + +typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 samplerYcbcrConversion; +} VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; + +typedef struct VkSamplerYcbcrConversionImageFormatPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t combinedImageSamplerDescriptorCount; +} VkSamplerYcbcrConversionImageFormatPropertiesKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversionKHR* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversionKHR ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversionKHR* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversionKHR ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif + +#define VK_KHR_bind_memory2 1 +#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" + +typedef struct VkBindBufferMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindBufferMemoryInfoKHR; + +typedef struct VkBindImageMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkImage image; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindImageMemoryInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfoKHR* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfoKHR* pBindInfos); +#endif + +#define VK_EXT_debug_report 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) + +#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 8 +#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" +#define VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT +#define VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT + + +typedef enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT = 31, + VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT = 32, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = 1000085000, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = 1000156000, + VK_DEBUG_REPORT_OBJECT_TYPE_BEGIN_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_END_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_RANGE_SIZE_EXT = (VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT - VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT + 1), + VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportObjectTypeEXT; + + +typedef enum VkDebugReportFlagBitsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, + VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportFlagBitsEXT; +typedef VkFlags VkDebugReportFlagsEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + + +typedef struct VkDebugReportCallbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; +} VkDebugReportCallbackCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( + VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugReportCallbackEXT* pCallback); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( + VkInstance instance, + VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( + VkInstance instance, + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage); +#endif + +#define VK_NV_glsl_shader 1 +#define VK_NV_GLSL_SHADER_SPEC_VERSION 1 +#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" + + +#define VK_EXT_depth_range_unrestricted 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" + + +#define VK_IMG_filter_cubic 1 +#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 +#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" + + +#define VK_AMD_rasterization_order 1 +#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 +#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" + + +typedef enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_BEGIN_RANGE_AMD = VK_RASTERIZATION_ORDER_STRICT_AMD, + VK_RASTERIZATION_ORDER_END_RANGE_AMD = VK_RASTERIZATION_ORDER_RELAXED_AMD, + VK_RASTERIZATION_ORDER_RANGE_SIZE_AMD = (VK_RASTERIZATION_ORDER_RELAXED_AMD - VK_RASTERIZATION_ORDER_STRICT_AMD + 1), + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF +} VkRasterizationOrderAMD; + +typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { + VkStructureType sType; + const void* pNext; + VkRasterizationOrderAMD rasterizationOrder; +} VkPipelineRasterizationStateRasterizationOrderAMD; + + + +#define VK_AMD_shader_trinary_minmax 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" + + +#define VK_AMD_shader_explicit_vertex_parameter 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" + + +#define VK_EXT_debug_marker 1 +#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 +#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" + +typedef struct VkDebugMarkerObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; +} VkDebugMarkerObjectNameInfoEXT; + +typedef struct VkDebugMarkerObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugMarkerObjectTagInfoEXT; + +typedef struct VkDebugMarkerMarkerInfoEXT { + VkStructureType sType; + const void* pNext; + const char* pMarkerName; + float color[4]; +} VkDebugMarkerMarkerInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif + +#define VK_AMD_gcn_shader 1 +#define VK_AMD_GCN_SHADER_SPEC_VERSION 1 +#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" + + +#define VK_NV_dedicated_allocation 1 +#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" + +typedef struct VkDedicatedAllocationImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationImageCreateInfoNV; + +typedef struct VkDedicatedAllocationBufferCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationBufferCreateInfoNV; + +typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkDedicatedAllocationMemoryAllocateInfoNV; + + + +#define VK_AMD_draw_indirect_count 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" + +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + +#define VK_AMD_negative_viewport_height 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" + + +#define VK_AMD_gpu_shader_half_float 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" + + +#define VK_AMD_shader_ballot 1 +#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 +#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" + + +#define VK_AMD_texture_gather_bias_lod 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" + +typedef struct VkTextureLODGatherFormatPropertiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 supportsTextureGatherLODBiasAMD; +} VkTextureLODGatherFormatPropertiesAMD; + + + +#define VK_KHX_multiview 1 +#define VK_KHX_MULTIVIEW_SPEC_VERSION 1 +#define VK_KHX_MULTIVIEW_EXTENSION_NAME "VK_KHX_multiview" + +typedef struct VkRenderPassMultiviewCreateInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; +} VkRenderPassMultiviewCreateInfoKHX; + +typedef struct VkPhysicalDeviceMultiviewFeaturesKHX { + VkStructureType sType; + void* pNext; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; +} VkPhysicalDeviceMultiviewFeaturesKHX; + +typedef struct VkPhysicalDeviceMultiviewPropertiesKHX { + VkStructureType sType; + void* pNext; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; +} VkPhysicalDeviceMultiviewPropertiesKHX; + + + +#define VK_IMG_format_pvrtc 1 +#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 +#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" + + +#define VK_NV_external_memory_capabilities 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" + + +typedef enum VkExternalMemoryHandleTypeFlagBitsNV { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsNV; +typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + +typedef enum VkExternalMemoryFeatureFlagBitsNV { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsNV; +typedef VkFlags VkExternalMemoryFeatureFlagsNV; + +typedef struct VkExternalImageFormatPropertiesNV { + VkImageFormatProperties imageFormatProperties; + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; +} VkExternalImageFormatPropertiesNV; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); +#endif + +#define VK_NV_external_memory 1 +#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" + +typedef struct VkExternalMemoryImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExternalMemoryImageCreateInfoNV; + +typedef struct VkExportMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExportMemoryAllocateInfoNV; + + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#define VK_NV_external_memory_win32 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32" + +typedef struct VkImportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleType; + HANDLE handle; +} VkImportMemoryWin32HandleInfoNV; + +typedef struct VkExportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; +} VkExportMemoryWin32HandleInfoNV; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV( + VkDevice device, + VkDeviceMemory memory, + VkExternalMemoryHandleTypeFlagsNV handleType, + HANDLE* pHandle); +#endif +#endif /* VK_USE_PLATFORM_WIN32_KHR */ + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#define VK_NV_win32_keyed_mutex 1 +#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex" + +typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeoutMilliseconds; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoNV; + + +#endif /* VK_USE_PLATFORM_WIN32_KHR */ + +#define VK_KHX_device_group 1 +#define VK_KHX_DEVICE_GROUP_SPEC_VERSION 2 +#define VK_KHX_DEVICE_GROUP_EXTENSION_NAME "VK_KHX_device_group" +#define VK_MAX_DEVICE_GROUP_SIZE_KHX 32 + + +typedef enum VkPeerMemoryFeatureFlagBitsKHX { + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHX = 0x00000001, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHX = 0x00000002, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHX = 0x00000004, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHX = 0x00000008, + VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF +} VkPeerMemoryFeatureFlagBitsKHX; +typedef VkFlags VkPeerMemoryFeatureFlagsKHX; + +typedef enum VkMemoryAllocateFlagBitsKHX { + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHX = 0x00000001, + VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF +} VkMemoryAllocateFlagBitsKHX; +typedef VkFlags VkMemoryAllocateFlagsKHX; + +typedef enum VkDeviceGroupPresentModeFlagBitsKHX { + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHX = 0x00000001, + VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHX = 0x00000002, + VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHX = 0x00000004, + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHX = 0x00000008, + VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHX = 0x7FFFFFFF +} VkDeviceGroupPresentModeFlagBitsKHX; +typedef VkFlags VkDeviceGroupPresentModeFlagsKHX; + +typedef struct VkMemoryAllocateFlagsInfoKHX { + VkStructureType sType; + const void* pNext; + VkMemoryAllocateFlagsKHX flags; + uint32_t deviceMask; +} VkMemoryAllocateFlagsInfoKHX; + +typedef struct VkDeviceGroupRenderPassBeginInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const VkRect2D* pDeviceRenderAreas; +} VkDeviceGroupRenderPassBeginInfoKHX; + +typedef struct VkDeviceGroupCommandBufferBeginInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; +} VkDeviceGroupCommandBufferBeginInfoKHX; + +typedef struct VkDeviceGroupSubmitInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; +} VkDeviceGroupSubmitInfoKHX; + +typedef struct VkDeviceGroupBindSparseInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; +} VkDeviceGroupBindSparseInfoKHX; + +typedef struct VkBindBufferMemoryDeviceGroupInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindBufferMemoryDeviceGroupInfoKHX; + +typedef struct VkBindImageMemoryDeviceGroupInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t SFRRectCount; + const VkRect2D* pSFRRects; +} VkBindImageMemoryDeviceGroupInfoKHX; + +typedef struct VkDeviceGroupPresentCapabilitiesKHX { + VkStructureType sType; + const void* pNext; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE_KHX]; + VkDeviceGroupPresentModeFlagsKHX modes; +} VkDeviceGroupPresentCapabilitiesKHX; + +typedef struct VkImageSwapchainCreateInfoKHX { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; +} VkImageSwapchainCreateInfoKHX; + +typedef struct VkBindImageMemorySwapchainInfoKHX { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndex; +} VkBindImageMemorySwapchainInfoKHX; + +typedef struct VkAcquireNextImageInfoKHX { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint64_t timeout; + VkSemaphore semaphore; + VkFence fence; + uint32_t deviceMask; +} VkAcquireNextImageInfoKHX; + +typedef struct VkDeviceGroupPresentInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + VkDeviceGroupPresentModeFlagBitsKHX mode; +} VkDeviceGroupPresentInfoKHX; + +typedef struct VkDeviceGroupSwapchainCreateInfoKHX { + VkStructureType sType; + const void* pNext; + VkDeviceGroupPresentModeFlagsKHX modes; +} VkDeviceGroupSwapchainCreateInfoKHX; + + +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHX)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlagsKHX* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHX)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHX)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHX)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHX* pDeviceGroupPresentCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHX)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHX* pModes); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHX)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHX)(VkDevice device, const VkAcquireNextImageInfoKHX* pAcquireInfo, uint32_t* pImageIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHX( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlagsKHX* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHX( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHX( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHX( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHX* pDeviceGroupPresentCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHX( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHX* pModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHX( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHX( + VkDevice device, + const VkAcquireNextImageInfoKHX* pAcquireInfo, + uint32_t* pImageIndex); +#endif + +#define VK_EXT_validation_flags 1 +#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" + + +typedef enum VkValidationCheckEXT { + VK_VALIDATION_CHECK_ALL_EXT = 0, + VK_VALIDATION_CHECK_SHADERS_EXT = 1, + VK_VALIDATION_CHECK_BEGIN_RANGE_EXT = VK_VALIDATION_CHECK_ALL_EXT, + VK_VALIDATION_CHECK_END_RANGE_EXT = VK_VALIDATION_CHECK_SHADERS_EXT, + VK_VALIDATION_CHECK_RANGE_SIZE_EXT = (VK_VALIDATION_CHECK_SHADERS_EXT - VK_VALIDATION_CHECK_ALL_EXT + 1), + VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCheckEXT; + +typedef struct VkValidationFlagsEXT { + VkStructureType sType; + const void* pNext; + uint32_t disabledValidationCheckCount; + VkValidationCheckEXT* pDisabledValidationChecks; +} VkValidationFlagsEXT; + + + +#ifdef VK_USE_PLATFORM_VI_NN +#define VK_NN_vi_surface 1 +#define VK_NN_VI_SURFACE_SPEC_VERSION 1 +#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface" + +typedef VkFlags VkViSurfaceCreateFlagsNN; + +typedef struct VkViSurfaceCreateInfoNN { + VkStructureType sType; + const void* pNext; + VkViSurfaceCreateFlagsNN flags; + void* window; +} VkViSurfaceCreateInfoNN; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN( + VkInstance instance, + const VkViSurfaceCreateInfoNN* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif +#endif /* VK_USE_PLATFORM_VI_NN */ + +#define VK_EXT_shader_subgroup_ballot 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" + + +#define VK_EXT_shader_subgroup_vote 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" + + +#define VK_KHX_device_group_creation 1 +#define VK_KHX_DEVICE_GROUP_CREATION_SPEC_VERSION 1 +#define VK_KHX_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHX_device_group_creation" + +typedef struct VkPhysicalDeviceGroupPropertiesKHX { + VkStructureType sType; + void* pNext; + uint32_t physicalDeviceCount; + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE_KHX]; + VkBool32 subsetAllocation; +} VkPhysicalDeviceGroupPropertiesKHX; + +typedef struct VkDeviceGroupDeviceCreateInfoKHX { + VkStructureType sType; + const void* pNext; + uint32_t physicalDeviceCount; + const VkPhysicalDevice* pPhysicalDevices; +} VkDeviceGroupDeviceCreateInfoKHX; + + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHX)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX* pPhysicalDeviceGroupProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHX( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupPropertiesKHX* pPhysicalDeviceGroupProperties); +#endif + +#define VK_NVX_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkObjectTableNVX) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNVX) + +#define VK_NVX_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 +#define VK_NVX_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NVX_device_generated_commands" + + +typedef enum VkIndirectCommandsTokenTypeNVX { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DESCRIPTOR_SET_NVX = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NVX = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NVX = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NVX = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NVX = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NVX = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_BEGIN_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_END_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_RANGE_SIZE_NVX = (VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX - VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX + 1), + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeNVX; + +typedef enum VkObjectEntryTypeNVX { + VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX = 0, + VK_OBJECT_ENTRY_TYPE_PIPELINE_NVX = 1, + VK_OBJECT_ENTRY_TYPE_INDEX_BUFFER_NVX = 2, + VK_OBJECT_ENTRY_TYPE_VERTEX_BUFFER_NVX = 3, + VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX = 4, + VK_OBJECT_ENTRY_TYPE_BEGIN_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX, + VK_OBJECT_ENTRY_TYPE_END_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX, + VK_OBJECT_ENTRY_TYPE_RANGE_SIZE_NVX = (VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX - VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX + 1), + VK_OBJECT_ENTRY_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF +} VkObjectEntryTypeNVX; + + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsNVX { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX = 0x00000004, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX = 0x00000008, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsNVX; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNVX; + +typedef enum VkObjectEntryUsageFlagBitsNVX { + VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX = 0x00000001, + VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX = 0x00000002, + VK_OBJECT_ENTRY_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF +} VkObjectEntryUsageFlagBitsNVX; +typedef VkFlags VkObjectEntryUsageFlagsNVX; + +typedef struct VkDeviceGeneratedCommandsFeaturesNVX { + VkStructureType sType; + const void* pNext; + VkBool32 computeBindingPointSupport; +} VkDeviceGeneratedCommandsFeaturesNVX; + +typedef struct VkDeviceGeneratedCommandsLimitsNVX { + VkStructureType sType; + const void* pNext; + uint32_t maxIndirectCommandsLayoutTokenCount; + uint32_t maxObjectEntryCounts; + uint32_t minSequenceCountBufferOffsetAlignment; + uint32_t minSequenceIndexBufferOffsetAlignment; + uint32_t minCommandsTokenBufferOffsetAlignment; +} VkDeviceGeneratedCommandsLimitsNVX; + +typedef struct VkIndirectCommandsTokenNVX { + VkIndirectCommandsTokenTypeNVX tokenType; + VkBuffer buffer; + VkDeviceSize offset; +} VkIndirectCommandsTokenNVX; + +typedef struct VkIndirectCommandsLayoutTokenNVX { + VkIndirectCommandsTokenTypeNVX tokenType; + uint32_t bindingUnit; + uint32_t dynamicCount; + uint32_t divisor; +} VkIndirectCommandsLayoutTokenNVX; + +typedef struct VkIndirectCommandsLayoutCreateInfoNVX { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkIndirectCommandsLayoutUsageFlagsNVX flags; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenNVX* pTokens; +} VkIndirectCommandsLayoutCreateInfoNVX; + +typedef struct VkCmdProcessCommandsInfoNVX { + VkStructureType sType; + const void* pNext; + VkObjectTableNVX objectTable; + VkIndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t indirectCommandsTokenCount; + const VkIndirectCommandsTokenNVX* pIndirectCommandsTokens; + uint32_t maxSequencesCount; + VkCommandBuffer targetCommandBuffer; + VkBuffer sequencesCountBuffer; + VkDeviceSize sequencesCountOffset; + VkBuffer sequencesIndexBuffer; + VkDeviceSize sequencesIndexOffset; +} VkCmdProcessCommandsInfoNVX; + +typedef struct VkCmdReserveSpaceForCommandsInfoNVX { + VkStructureType sType; + const void* pNext; + VkObjectTableNVX objectTable; + VkIndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t maxSequencesCount; +} VkCmdReserveSpaceForCommandsInfoNVX; + +typedef struct VkObjectTableCreateInfoNVX { + VkStructureType sType; + const void* pNext; + uint32_t objectCount; + const VkObjectEntryTypeNVX* pObjectEntryTypes; + const uint32_t* pObjectEntryCounts; + const VkObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags; + uint32_t maxUniformBuffersPerDescriptor; + uint32_t maxStorageBuffersPerDescriptor; + uint32_t maxStorageImagesPerDescriptor; + uint32_t maxSampledImagesPerDescriptor; + uint32_t maxPipelineLayouts; +} VkObjectTableCreateInfoNVX; + +typedef struct VkObjectTableEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; +} VkObjectTableEntryNVX; + +typedef struct VkObjectTablePipelineEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipeline pipeline; +} VkObjectTablePipelineEntryNVX; + +typedef struct VkObjectTableDescriptorSetEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipelineLayout pipelineLayout; + VkDescriptorSet descriptorSet; +} VkObjectTableDescriptorSetEntryNVX; + +typedef struct VkObjectTableVertexBufferEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkBuffer buffer; +} VkObjectTableVertexBufferEntryNVX; + +typedef struct VkObjectTableIndexBufferEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkBuffer buffer; + VkIndexType indexType; +} VkObjectTableIndexBufferEntryNVX; + +typedef struct VkObjectTablePushConstantEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipelineLayout pipelineLayout; + VkShaderStageFlags stageFlags; +} VkObjectTablePushConstantEntryNVX; + + +typedef void (VKAPI_PTR *PFN_vkCmdProcessCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdReserveSpaceForCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNVX)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNVX)(VkDevice device, VkIndirectCommandsLayoutNVX indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateObjectTableNVX)(VkDevice device, const VkObjectTableCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkObjectTableNVX* pObjectTable); +typedef void (VKAPI_PTR *PFN_vkDestroyObjectTableNVX)(VkDevice device, VkObjectTableNVX objectTable, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices); +typedef VkResult (VKAPI_PTR *PFN_vkUnregisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX)(VkPhysicalDevice physicalDevice, VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, VkDeviceGeneratedCommandsLimitsNVX* pLimits); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdProcessCommandsNVX( + VkCommandBuffer commandBuffer, + const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdReserveSpaceForCommandsNVX( + VkCommandBuffer commandBuffer, + const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNVX( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNVX( + VkDevice device, + VkIndirectCommandsLayoutNVX indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateObjectTableNVX( + VkDevice device, + const VkObjectTableCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkObjectTableNVX* pObjectTable); + +VKAPI_ATTR void VKAPI_CALL vkDestroyObjectTableNVX( + VkDevice device, + VkObjectTableNVX objectTable, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterObjectsNVX( + VkDevice device, + VkObjectTableNVX objectTable, + uint32_t objectCount, + const VkObjectTableEntryNVX* const* ppObjectTableEntries, + const uint32_t* pObjectIndices); + +VKAPI_ATTR VkResult VKAPI_CALL vkUnregisterObjectsNVX( + VkDevice device, + VkObjectTableNVX objectTable, + uint32_t objectCount, + const VkObjectEntryTypeNVX* pObjectEntryTypes, + const uint32_t* pObjectIndices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( + VkPhysicalDevice physicalDevice, + VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, + VkDeviceGeneratedCommandsLimitsNVX* pLimits); +#endif + +#define VK_NV_clip_space_w_scaling 1 +#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 +#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" + +typedef struct VkViewportWScalingNV { + float xcoeff; + float ycoeff; +} VkViewportWScalingNV; + +typedef struct VkPipelineViewportWScalingStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportWScalingEnable; + uint32_t viewportCount; + const VkViewportWScalingNV* pViewportWScalings; +} VkPipelineViewportWScalingStateCreateInfoNV; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings); +#endif + +#define VK_EXT_direct_mode_display 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" + +typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); +#endif + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +#define VK_EXT_acquire_xlib_display 1 +#include + +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display" + +typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display); +typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + RROutput rrOutput, + VkDisplayKHR* pDisplay); +#endif +#endif /* VK_USE_PLATFORM_XLIB_XRANDR_EXT */ + +#define VK_EXT_display_surface_counter 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" +#define VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT + + +typedef enum VkSurfaceCounterFlagBitsEXT { + VK_SURFACE_COUNTER_VBLANK_EXT = 0x00000001, + VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSurfaceCounterFlagBitsEXT; +typedef VkFlags VkSurfaceCounterFlagsEXT; + +typedef struct VkSurfaceCapabilities2EXT { + VkStructureType sType; + void* pNext; + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; + VkSurfaceCounterFlagsEXT supportedSurfaceCounters; +} VkSurfaceCapabilities2EXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); +#endif + +#define VK_EXT_display_control 1 +#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" + + +typedef enum VkDisplayPowerStateEXT { + VK_DISPLAY_POWER_STATE_OFF_EXT = 0, + VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, + VK_DISPLAY_POWER_STATE_ON_EXT = 2, + VK_DISPLAY_POWER_STATE_BEGIN_RANGE_EXT = VK_DISPLAY_POWER_STATE_OFF_EXT, + VK_DISPLAY_POWER_STATE_END_RANGE_EXT = VK_DISPLAY_POWER_STATE_ON_EXT, + VK_DISPLAY_POWER_STATE_RANGE_SIZE_EXT = (VK_DISPLAY_POWER_STATE_ON_EXT - VK_DISPLAY_POWER_STATE_OFF_EXT + 1), + VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayPowerStateEXT; + +typedef enum VkDeviceEventTypeEXT { + VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, + VK_DEVICE_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT, + VK_DEVICE_EVENT_TYPE_END_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT, + VK_DEVICE_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT - VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT + 1), + VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceEventTypeEXT; + +typedef enum VkDisplayEventTypeEXT { + VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, + VK_DISPLAY_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT, + VK_DISPLAY_EVENT_TYPE_END_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT, + VK_DISPLAY_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT - VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT + 1), + VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayEventTypeEXT; + +typedef struct VkDisplayPowerInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayPowerStateEXT powerState; +} VkDisplayPowerInfoEXT; + +typedef struct VkDeviceEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceEventTypeEXT deviceEvent; +} VkDeviceEventInfoEXT; + +typedef struct VkDisplayEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayEventTypeEXT displayEvent; +} VkDisplayEventInfoEXT; + +typedef struct VkSwapchainCounterCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSurfaceCounterFlagsEXT surfaceCounters; +} VkSwapchainCounterCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue); +#endif + +#define VK_GOOGLE_display_timing 1 +#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 +#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" + +typedef struct VkRefreshCycleDurationGOOGLE { + uint64_t refreshDuration; +} VkRefreshCycleDurationGOOGLE; + +typedef struct VkPastPresentationTimingGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; +} VkPastPresentationTimingGOOGLE; + +typedef struct VkPresentTimeGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; +} VkPresentTimeGOOGLE; + +typedef struct VkPresentTimesInfoGOOGLE { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimeGOOGLE* pTimes; +} VkPresentTimesInfoGOOGLE; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings); +#endif + +#define VK_NV_sample_mask_override_coverage 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" + + +#define VK_NV_geometry_shader_passthrough 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" + + +#define VK_NV_viewport_array2 1 +#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2" + + +#define VK_NVX_multiview_per_view_attributes 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" + +typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + VkStructureType sType; + void* pNext; + VkBool32 perViewPositionAllComponents; +} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + + + +#define VK_NV_viewport_swizzle 1 +#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" + + +typedef enum VkViewportCoordinateSwizzleNV { + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, + VK_VIEWPORT_COORDINATE_SWIZZLE_BEGIN_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, + VK_VIEWPORT_COORDINATE_SWIZZLE_END_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV, + VK_VIEWPORT_COORDINATE_SWIZZLE_RANGE_SIZE_NV = (VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV - VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV + 1), + VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF +} VkViewportCoordinateSwizzleNV; + +typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; + +typedef struct VkViewportSwizzleNV { + VkViewportCoordinateSwizzleNV x; + VkViewportCoordinateSwizzleNV y; + VkViewportCoordinateSwizzleNV z; + VkViewportCoordinateSwizzleNV w; +} VkViewportSwizzleNV; + +typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const VkViewportSwizzleNV* pViewportSwizzles; +} VkPipelineViewportSwizzleStateCreateInfoNV; + + + +#define VK_EXT_discard_rectangles 1 +#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1 +#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" + + +typedef enum VkDiscardRectangleModeEXT { + VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, + VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, + VK_DISCARD_RECTANGLE_MODE_BEGIN_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT, + VK_DISCARD_RECTANGLE_MODE_END_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT, + VK_DISCARD_RECTANGLE_MODE_RANGE_SIZE_EXT = (VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT - VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT + 1), + VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDiscardRectangleModeEXT; + +typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; + +typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxDiscardRectangles; +} VkPhysicalDeviceDiscardRectanglePropertiesEXT; + +typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineDiscardRectangleStateCreateFlagsEXT flags; + VkDiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const VkRect2D* pDiscardRectangles; +} VkPipelineDiscardRectangleStateCreateInfoEXT; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles); +#endif + +#define VK_EXT_swapchain_colorspace 1 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 3 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" + + +#define VK_EXT_hdr_metadata 1 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 1 +#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" + +typedef struct VkXYColorEXT { + float x; + float y; +} VkXYColorEXT; + +typedef struct VkHdrMetadataEXT { + VkStructureType sType; + const void* pNext; + VkXYColorEXT displayPrimaryRed; + VkXYColorEXT displayPrimaryGreen; + VkXYColorEXT displayPrimaryBlue; + VkXYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; +} VkHdrMetadataEXT; + + +typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata); +#endif + +#ifdef VK_USE_PLATFORM_IOS_MVK +#define VK_MVK_ios_surface 1 +#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface" + +typedef VkFlags VkIOSSurfaceCreateFlagsMVK; + +typedef struct VkIOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkIOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkIOSSurfaceCreateInfoMVK; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK( + VkInstance instance, + const VkIOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif +#endif /* VK_USE_PLATFORM_IOS_MVK */ + +#ifdef VK_USE_PLATFORM_MACOS_MVK +#define VK_MVK_macos_surface 1 +#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface" + +typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; + +typedef struct VkMacOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkMacOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkMacOSSurfaceCreateInfoMVK; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK( + VkInstance instance, + const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif +#endif /* VK_USE_PLATFORM_MACOS_MVK */ + +#define VK_EXT_sampler_filter_minmax 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" + + +typedef enum VkSamplerReductionModeEXT { + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = 0, + VK_SAMPLER_REDUCTION_MODE_MIN_EXT = 1, + VK_SAMPLER_REDUCTION_MODE_MAX_EXT = 2, + VK_SAMPLER_REDUCTION_MODE_BEGIN_RANGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT, + VK_SAMPLER_REDUCTION_MODE_END_RANGE_EXT = VK_SAMPLER_REDUCTION_MODE_MAX_EXT, + VK_SAMPLER_REDUCTION_MODE_RANGE_SIZE_EXT = (VK_SAMPLER_REDUCTION_MODE_MAX_EXT - VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT + 1), + VK_SAMPLER_REDUCTION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSamplerReductionModeEXT; + +typedef struct VkSamplerReductionModeCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSamplerReductionModeEXT reductionMode; +} VkSamplerReductionModeCreateInfoEXT; + +typedef struct VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; +} VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + + +#define VK_AMD_gpu_shader_int16 1 +#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 1 +#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" + + +#define VK_AMD_mixed_attachment_samples 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" + + +#define VK_AMD_shader_fragment_mask 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" + + +#define VK_EXT_shader_stencil_export 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" + + +#define VK_EXT_sample_locations 1 +#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 +#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" + +typedef struct VkSampleLocationEXT { + float x; + float y; +} VkSampleLocationEXT; + +typedef struct VkSampleLocationsInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampleCountFlagBits sampleLocationsPerPixel; + VkExtent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const VkSampleLocationEXT* pSampleLocations; +} VkSampleLocationsInfoEXT; + +typedef struct VkAttachmentSampleLocationsEXT { + uint32_t attachmentIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkAttachmentSampleLocationsEXT; + +typedef struct VkSubpassSampleLocationsEXT { + uint32_t subpassIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkSubpassSampleLocationsEXT; + +typedef struct VkRenderPassSampleLocationsBeginInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentInitialSampleLocationsCount; + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const VkSubpassSampleLocationsEXT* pSubpassSampleLocations; +} VkRenderPassSampleLocationsBeginInfoEXT; + +typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 sampleLocationsEnable; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkPipelineSampleLocationsStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleLocationSampleCounts; + VkExtent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + VkBool32 variableSampleLocations; +} VkPhysicalDeviceSampleLocationsPropertiesEXT; + +typedef struct VkMultisamplePropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D maxSampleLocationGridSize; +} VkMultisamplePropertiesEXT; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties); +#endif + +#define VK_EXT_blend_operation_advanced 1 +#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 +#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" + + +typedef enum VkBlendOverlapEXT { + VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, + VK_BLEND_OVERLAP_DISJOINT_EXT = 1, + VK_BLEND_OVERLAP_CONJOINT_EXT = 2, + VK_BLEND_OVERLAP_BEGIN_RANGE_EXT = VK_BLEND_OVERLAP_UNCORRELATED_EXT, + VK_BLEND_OVERLAP_END_RANGE_EXT = VK_BLEND_OVERLAP_CONJOINT_EXT, + VK_BLEND_OVERLAP_RANGE_SIZE_EXT = (VK_BLEND_OVERLAP_CONJOINT_EXT - VK_BLEND_OVERLAP_UNCORRELATED_EXT + 1), + VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBlendOverlapEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 advancedBlendCoherentOperations; +} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t advancedBlendMaxColorAttachments; + VkBool32 advancedBlendIndependentBlend; + VkBool32 advancedBlendNonPremultipliedSrcColor; + VkBool32 advancedBlendNonPremultipliedDstColor; + VkBool32 advancedBlendCorrelatedOverlap; + VkBool32 advancedBlendAllOperations; +} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; +} VkPipelineColorBlendAdvancedStateCreateInfoEXT; + + + +#define VK_NV_fragment_coverage_to_color 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" + +typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; + +typedef struct VkPipelineCoverageToColorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageToColorStateCreateFlagsNV flags; + VkBool32 coverageToColorEnable; + uint32_t coverageToColorLocation; +} VkPipelineCoverageToColorStateCreateInfoNV; + + + +#define VK_NV_framebuffer_mixed_samples 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" + + +typedef enum VkCoverageModulationModeNV { + VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, + VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, + VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, + VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, + VK_COVERAGE_MODULATION_MODE_BEGIN_RANGE_NV = VK_COVERAGE_MODULATION_MODE_NONE_NV, + VK_COVERAGE_MODULATION_MODE_END_RANGE_NV = VK_COVERAGE_MODULATION_MODE_RGBA_NV, + VK_COVERAGE_MODULATION_MODE_RANGE_SIZE_NV = (VK_COVERAGE_MODULATION_MODE_RGBA_NV - VK_COVERAGE_MODULATION_MODE_NONE_NV + 1), + VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageModulationModeNV; + +typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; + +typedef struct VkPipelineCoverageModulationStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageModulationStateCreateFlagsNV flags; + VkCoverageModulationModeNV coverageModulationMode; + VkBool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; +} VkPipelineCoverageModulationStateCreateInfoNV; + + + +#define VK_NV_fill_rectangle 1 +#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 +#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" + + +#define VK_EXT_post_depth_coverage 1 +#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 +#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" + + +#define VK_EXT_validation_cache 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) + +#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" + + +typedef enum VkValidationCacheHeaderVersionEXT { + VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, + VK_VALIDATION_CACHE_HEADER_VERSION_BEGIN_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT, + VK_VALIDATION_CACHE_HEADER_VERSION_END_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT, + VK_VALIDATION_CACHE_HEADER_VERSION_RANGE_SIZE_EXT = (VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT - VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT + 1), + VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCacheHeaderVersionEXT; + +typedef VkFlags VkValidationCacheCreateFlagsEXT; + +typedef struct VkValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; +} VkValidationCacheCreateInfoEXT; + +typedef struct VkShaderModuleValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheEXT validationCache; +} VkShaderModuleValidationCacheCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); +typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData); +#endif + +#define VK_EXT_shader_viewport_index_layer 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/nedmalloc/malloc.c.h b/sources/nedmalloc/malloc.c.h new file mode 100755 index 00000000..4803a782 --- /dev/null +++ b/sources/nedmalloc/malloc.c.h @@ -0,0 +1,5709 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/licenses/publicdomain. Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + + ** Modified by OGRE to support MinGW 5 Jan 2009 + +* Version pre-2.8.4 Mon Nov 27 11:22:37 2006 (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (default) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with either a pthread mutex or a win32 + spinlock (depending on WIN32). This is not especially fast, and + can be a major bottleneck. It is designed only to provide + minimal protection in concurrent environments, and to provide a + basis for extensions. If you are using malloc in a concurrent + program, consider instead using nedmalloc + (http://www.nedprod.com/programs/portable/nedmalloc/) or + ptmalloc (See http://www.malloc.de), which are derived + from versions of this malloc. + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. In real-time + applications, you can optionally suppress segment traversals using + NO_SEGMENT_TRAVERSAL, which assures bounded execution even when + system allocators return non-contiguous spaces, at the typical + expense of carrying around more memory and increased fragmentation. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. You can also +use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. Beware that there seem to be some + cases where this malloc might not be a pure drop-in replacement for + Win32 malloc: Random-looking failures from Win32 GDI API's (eg; + SetDIBits()) may be due to bugs in some video driver implementations + when pixel buffers are malloc()ed, and the region spans more than + one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb) + default granularity, pixel buffers may straddle virtual allocation + regions more often than when using the Microsoft allocator. You can + avoid this by using VirtualAlloc() and VirtualFree() for all pixel + buffers rather than using malloc(). If this is not possible, + recompile this malloc with a larger DEFAULT_GRANULARITY. + +MALLOC_ALIGNMENT default: (size_t)8 + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) If set to a + non-zero value other than 1, locks are used, but their + implementation is left out, so lock functions must be supplied manually. + +USE_SPIN_LOCKS default: 1 iff USE_LOCKS and on x86 using gcc or MSC + If true, uses custom spin locks for locking. This is currently + supported only for x86 platforms using gcc or recent MS compilers. + Otherwise, posix locks or win32 critical sections are used. + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. + +MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +NO_SEGMENT_TRAVERSAL default: 0 + If non-zero, suppresses traversals of memory segments + returned by either MORECORE or CALL_MMAP. This disables + merging of segments that are contiguous, and selectively + releasing them to the OS if unused, but bounds execution times. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 except on WINCE. + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero and on WIN32 except for WINCE. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. Similarly for Win32 under recent MS compilers. + (On most x86s, the asm version is only slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP + The number of consolidated frees between checks to release + unused segments when freeing. When using non-contiguous segments, + especially with multiple mspaces, checking only for topmost space + doesn't always suffice to trigger trimming. To compensate for this, + free() will, with a period of MAX_RELEASE_CHECK_RATE (or the + current number of segments, if greater) try to release unused + segments to the OS when freeing chunks that result in + consolidation. The best value for this parameter is a compromise + between slowing down frees with relatively costly checks that + rarely trigger versus holding on to unused memory. To effectively + disable, set to MAX_SIZE_T. This may lead to a very slight speed + improvement at the expense of carrying around more memory. +*/ + +/* Version identifier to allow people to support multiple versions */ +#ifndef DLMALLOC_VERSION +#define DLMALLOC_VERSION 20804 +#endif /* DLMALLOC_VERSION */ + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#ifdef _WIN32_WCE +#define LACKS_FCNTL_H +#define WIN32 1 +#endif /* _WIN32_WCE */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#if (_WIN32_WINNT < 0x403) +#define _WIN32_WINNT 0x403 +#endif +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION +#endif /* MALLOC_FAILURE_ACTION */ +#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ +#define MMAP_CLEARS 0 +#else +#define MMAP_CLEARS 1 +#endif /* _WIN32_WCE */ +#endif /* WIN32 */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +/* OSX allocators provide 16 byte alignment */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)16U) +#endif +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 /* define to a value */ +#else +#define ONLY_MSPACES 1 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)8U) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ +#ifndef USE_LOCKS +#define USE_LOCKS 0 +#endif /* USE_LOCKS */ +#ifndef USE_SPIN_LOCKS +#if USE_LOCKS && (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310) +#define USE_SPIN_LOCKS 1 +#else +#define USE_SPIN_LOCKS 0 +#endif /* USE_LOCKS && ... */ +#endif /* USE_SPIN_LOCKS */ +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#define MORECORE_DEFAULT sbrk +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if (MORECORE_CONTIGUOUS || defined(WIN32)) +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef MAX_RELEASE_CHECK_RATE +#if HAVE_MMAP +#define MAX_RELEASE_CHECK_RATE 4095 +#else +#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* MAX_RELEASE_CHECK_RATE */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ +#ifndef NO_SEGMENT_TRAVERSAL +#define NO_SEGMENT_TRAVERSAL 0 +#endif /* NO_SEGMENT_TRAVERSAL */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#elif defined(__ANDROID__) || defined(ANDROID) +#include +#define STRUCT_MALLINFO_DECLARED 1 +#else /* HAVE_USR_INCLUDE_MALLOC_H */ +#ifndef STRUCT_MALLINFO_DECLARED +#define STRUCT_MALLINFO_DECLARED 1 +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; +#endif /* STRUCT_MALLINFO_DECLARED */ +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +/* + Try to persuade compilers to inline. The most critical functions for + inlining are defined as macros, so these aren't used for them. +*/ + +#ifndef FORCEINLINE + #if defined(__GNUC__) +#define FORCEINLINE __inline __attribute__ ((always_inline)) + #elif defined(_MSC_VER) + #define FORCEINLINE __forceinline + #endif +#endif +#ifndef NOINLINE + #if defined(__GNUC__) + #define NOINLINE __attribute__ ((noinline)) + #elif defined(_MSC_VER) + #define NOINLINE __declspec(noinline) + #else + #define NOINLINE + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#ifndef FORCEINLINE + #define FORCEINLINE inline +#endif +#endif /* __cplusplus */ +#ifndef FORCEINLINE + #define FORCEINLINE +#endif + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlrealloc realloc +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ + +void* dlrealloc(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +void* dlmemalign(size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. To workaround the fact that mallopt is specified to use int, + not size_t parameters, the value -1 is specially treated as the + maximum unsigned size_t value. + + SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +size_t dlmalloc_max_footprint(void); + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +void** dlindependent_comalloc(size_t, size_t*, void**); + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +int dlmalloc_trim(size_t); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +void dlmalloc_stats(void); + +#endif /* ONLY_MSPACES */ + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_mmap_large_chunks controls whether requests for large chunks + are allocated in their own mmapped regions, separate from others in + this mspace. By default this is enabled, which reduces + fragmentation. However, such chunks are not necessarily released to + the system upon destroy_mspace. Disabling by setting to false may + increase fragmentation, but avoids leakage when relying on + destroy_mspace to release all memory allocated using this space. +*/ +int mspace_mmap_large_chunks(mspace msp, int enable); + + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + malloc_usable_size(void* p) behaves the same as malloc_usable_size; +*/ + size_t mspace_usable_size(void* mem); + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef WIN32 +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* WIN32 */ + +#include /* for printing in malloc_stats */ + +#ifndef LACKS_ERRNO_H +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#if FOOTERS +#include /* for magic initialization */ +#endif /* FOOTERS */ +#ifndef LACKS_STDLIB_H +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#undef assert +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#ifndef assert +#define assert(x) +#endif +#define DEBUG 0 +#endif /* DEBUG */ +#ifndef LACKS_STRING_H +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +#include /* for mmap */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#ifndef LACKS_UNISTD_H +#include /* for sbrk, sysconf */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ + +/* Declarations for locking */ +#if USE_LOCKS +#ifndef WIN32 +#include +#if defined (__SVR4) && defined (__sun) /* solaris */ +#include +#endif /* solaris */ +#else +#ifndef _M_AMD64 +/* These are already defined on AMD64 builds */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); +LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _M_AMD64 */ +#pragma intrinsic (_InterlockedCompareExchange) +#pragma intrinsic (_InterlockedExchange) +#define interlockedcompareexchange _InterlockedCompareExchange +#define interlockedexchange _InterlockedExchange +#endif /* Win32 */ +// --- BEGIN OGRE MODIFICATION --- +// MinGW compatibility +#ifdef __MINGW32__ +#define interlockedcompareexchange InterlockedCompareExchange +#define interlockedexchange InterlockedExchange +#endif /* __MINGW32__ */ +// --- END OGRE MODIFICATION --- +#endif /* USE_LOCKS */ + +/* Declarations for bit scanning on win32 */ +#if defined(_MSC_VER) && _MSC_VER>=1300 +#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +unsigned char _BitScanForward(unsigned long *index, unsigned long mask); +unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#define BitScanForward _BitScanForward +#define BitScanReverse _BitScanReverse +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) +#endif /* BitScanForward */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + + + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define SIZE_T_FOUR ((size_t)4) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if HAVE_MMAP + +#ifndef WIN32 +#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static FORCEINLINE void* win32mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static FORCEINLINE void* win32direct_mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* This function supports releasing coalesed segments */ +static FORCEINLINE int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = (char*)ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; +} + +#define MMAP_DEFAULT(s) win32mmap(s) +#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MREMAP +#ifndef WIN32 +#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) +#endif /* WIN32 */ +#endif /* HAVE_MREMAP */ + + +/** + * Define CALL_MORECORE + */ +#if HAVE_MORECORE + #ifdef MORECORE + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ +#else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/** + * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP + */ +#if HAVE_MMAP + #define IS_MMAPPED_BIT (SIZE_T_ONE) + #define USE_MMAP_BIT (SIZE_T_ONE) + + #ifdef MMAP + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ + #ifdef MUNMAP + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ + #ifdef DIRECT_MMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ +#else /* HAVE_MMAP */ + #define IS_MMAPPED_BIT (SIZE_T_ZERO) + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) +#endif /* HAVE_MMAP */ + +/** + * Define CALL_MREMAP + */ +#if HAVE_MMAP && HAVE_MREMAP + #ifdef MREMAP + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ +#else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +/* mstate bit set if continguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +/* + When locks are defined, there is one global lock, plus + one per-mspace lock. + + The global lock_ensures that mparams.magic and other unique + mparams values are initialized only once. It also protects + sequences of calls to MORECORE. In many cases sys_alloc requires + two calls, that should not be interleaved with calls by other + threads. This does not protect against direct calls to MORECORE + by other threads not using this lock, so there is still code to + cope the best we can on interference. + + Per-mspace locks surround calls to malloc, free, etc. To enable use + in layered extensions, per-mspace locks are reentrant. + + Because lock-protected regions generally have bounded times, it is + OK to use the supplied simple spinlocks in the custom versions for + x86. + + If USE_LOCKS is > 1, the definitions of lock routines here are + bypassed, in which case you will need to define at least + INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly TRY_LOCK + (which is not used in this malloc, but commonly needed in + extensions.) +*/ + +#if USE_LOCKS == 1 + +#if USE_SPIN_LOCKS +#ifndef WIN32 + +/* Custom pthread-style spin locks on x86 and x64 for gcc */ +struct pthread_mlock_t { + volatile unsigned int l; + volatile unsigned int c; + volatile pthread_t threadid; +}; +#define MLOCK_T struct pthread_mlock_t +#define CURRENT_THREAD pthread_self() +#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) +#define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl) +#define RELEASE_LOCK(sl) pthread_release_lock(sl) +#define TRY_LOCK(sl) pthread_try_lock(sl) +#define SPINS_PER_YIELD 63 + +static MLOCK_T malloc_global_mutex = { 0, 0, 0}; + +static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) { + int spins = 0; + volatile unsigned int* lp = &sl->l; + for (;;) { + if (*lp != 0) { + if (sl->threadid == CURRENT_THREAD) { + ++sl->c; + return 0; + } + } + else { + /* place args to cmpxchgl in locals to evade oddities in some gccs */ + int cmp = 0; + int val = 1; + int ret; + __asm__ __volatile__ ("lock; cmpxchgl %1, %2" + : "=a" (ret) + : "r" (val), "m" (*(lp)), "0"(cmp) + : "memory", "cc"); + if (!ret) { + assert(!sl->threadid); + sl->c = 1; + sl->threadid = CURRENT_THREAD; + return 0; + } + if ((++spins & SPINS_PER_YIELD) == 0) { +#if defined (__SVR4) && defined (__sun) /* solaris */ + thr_yield(); +#else +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) + sched_yield(); +#else /* no-op yield on unknown systems */ + ; +#endif /* __linux__ || __FreeBSD__ || __APPLE__ */ +#endif /* solaris */ + } + } + } +} + +static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) { + assert(sl->l != 0); + assert(sl->threadid == CURRENT_THREAD); + if (--sl->c == 0) { + sl->threadid = 0; + volatile unsigned int* lp = &sl->l; + int prev = 0; + int ret; + __asm__ __volatile__ ("lock; xchgl %0, %1" + : "=r" (ret) + : "m" (*(lp)), "0"(prev) + : "memory"); + } +} + +static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) { + volatile unsigned int* lp = &sl->l; + if (*lp != 0) { + if (sl->threadid == CURRENT_THREAD) { + ++sl->c; + return 1; + } + } + else { + int cmp = 0; + int val = 1; + int ret; + __asm__ __volatile__ ("lock; cmpxchgl %1, %2" + : "=a" (ret) + : "r" (val), "m" (*(lp)), "0"(cmp) + : "memory", "cc"); + if (!ret) { + assert(!sl->threadid); + sl->c = 1; + sl->threadid = CURRENT_THREAD; + return 1; + } + } + return 0; +} + + +#else /* WIN32 */ +/* Custom win32-style spin locks on x86 and x64 for MSC */ +struct win32_mlock_t +{ + volatile long l; + volatile unsigned int c; + volatile long threadid; +}; + +#define MLOCK_T struct win32_mlock_t +#define CURRENT_THREAD win32_getcurrentthreadid() +#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) +#define ACQUIRE_LOCK(sl) win32_acquire_lock(sl) +#define RELEASE_LOCK(sl) win32_release_lock(sl) +#define TRY_LOCK(sl) win32_try_lock(sl) +#define SPINS_PER_YIELD 63 + +static MLOCK_T malloc_global_mutex = { 0, 0, 0}; + +static FORCEINLINE long win32_getcurrentthreadid() { +#ifdef _MSC_VER +#if defined(_M_IX86) + long *threadstruct=(long *)__readfsdword(0x18); + long threadid=threadstruct[0x24/sizeof(long)]; + return threadid; +#elif defined(_M_X64) + /* todo */ + return GetCurrentThreadId(); +#else + return GetCurrentThreadId(); +#endif +#else + return GetCurrentThreadId(); +#endif +} + +static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) { + int spins = 0; + for (;;) { + if (sl->l != 0) { + if (sl->threadid == CURRENT_THREAD) { + ++sl->c; + return 0; + } + } + else { + if (!interlockedexchange(&sl->l, 1)) { + assert(!sl->threadid); + sl->c=CURRENT_THREAD; + sl->threadid = CURRENT_THREAD; + sl->c = 1; + return 0; + } + } + if ((++spins & SPINS_PER_YIELD) == 0) + SleepEx(0, FALSE); + } +} + +static FORCEINLINE void win32_release_lock (MLOCK_T *sl) { + assert(sl->threadid == CURRENT_THREAD); + assert(sl->l != 0); + if (--sl->c == 0) { + sl->threadid = 0; + interlockedexchange (&sl->l, 0); + } +} + +static FORCEINLINE int win32_try_lock (MLOCK_T *sl) { + if(sl->l != 0) { + if (sl->threadid == CURRENT_THREAD) { + ++sl->c; + return 1; + } + } + else { + if (!interlockedexchange(&sl->l, 1)){ + assert(!sl->threadid); + sl->threadid = CURRENT_THREAD; + sl->c = 1; + return 1; + } + } + return 0; +} + +#endif /* WIN32 */ +#else /* USE_SPIN_LOCKS */ + +#ifndef WIN32 +/* pthreads-based locks */ + +#define MLOCK_T pthread_mutex_t +#define CURRENT_THREAD pthread_self() +#define INITIAL_LOCK(sl) pthread_init_lock(sl) +#define ACQUIRE_LOCK(sl) pthread_mutex_lock(sl) +#define RELEASE_LOCK(sl) pthread_mutex_unlock(sl) +#define TRY_LOCK(sl) (!pthread_mutex_trylock(sl)) + +static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* Cope with old-style linux recursive lock initialization by adding */ +/* skipped internal declaration from pthread.h */ +#ifdef linux +#if !defined (PTHREAD_MUTEX_RECURSIVE) && defined (PTHREAD_MUTEX_RECURSIVE_NP) +extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, + int __kind)); +#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP +#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) +#endif +#endif + +static int pthread_init_lock (MLOCK_T *sl) { + pthread_mutexattr_t attr; + if (pthread_mutexattr_init(&attr)) return 1; + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; + if (pthread_mutex_init(sl, &attr)) return 1; + if (pthread_mutexattr_destroy(&attr)) return 1; + return 0; +} + +#else /* WIN32 */ +/* Win32 critical sections */ +#define MLOCK_T CRITICAL_SECTION +#define CURRENT_THREAD GetCurrentThreadId() +#define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000)) +#define ACQUIRE_LOCK(s) (EnterCriticalSection(s), 0) +#define RELEASE_LOCK(s) LeaveCriticalSection(s) +#define TRY_LOCK(s) TryEnterCriticalSection(s) +#define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; +static volatile long malloc_global_mutex_status; + +/* Use spin loop to initialize global lock */ +static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; + if (stat > 0) + return; + /* transition to < 0 while initializing, then to > 0) */ + if (stat == 0 && + interlockedcompareexchange(&malloc_global_mutex_status, -1, 0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); + interlockedexchange(&malloc_global_mutex_status,1); + return; + } + SleepEx(0, FALSE); + } +} + +#endif /* WIN32 */ +#endif /* USE_SPIN_LOCKS */ +#endif /* USE_LOCKS == 1 */ + +/* ----------------------- User-defined locks ------------------------ */ + +#if USE_LOCKS > 1 +/* Define your own lock implementation here */ +/* #define INITIAL_LOCK(sl) ... */ +/* #define ACQUIRE_LOCK(sl) ... */ +/* #define RELEASE_LOCK(sl) ... */ +/* #define TRY_LOCK(sl) ... */ +/* static MLOCK_T malloc_global_mutex = ... */ +#endif /* USE_LOCKS > 1 */ + +/* ----------------------- Lock-based state ------------------------ */ + +#if USE_LOCKS +#define USE_LOCK_BIT (2U) +#else /* USE_LOCKS */ +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) +#endif /* USE_LOCKS */ + +#if USE_LOCKS +#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); +#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); +#else /* USE_LOCKS */ +#define ACQUIRE_MALLOC_GLOBAL_LOCK() +#define RELEASE_MALLOC_GLOBAL_LOCK() +#endif /* USE_LOCKS */ + + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 0) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse. This redundancy enables usage checks within free and realloc, + and reduces indirection when freeing and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, which have the lowest-order bit + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set + PINUSE_BIT in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use. If the chunk was obtained with mmap, the prev_foot field has + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the + mmapped region to the base of the chunk. + + FLAG4_BIT is not used by this malloc, but might be useful in extensions. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define FLAG4_BIT (SIZE_T_FOUR) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) +#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(FLAG_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_mmapped(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If IS_MMAPPED_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ +}; + +#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) +#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + timming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; + void* extp; /* Unused but available for extensions */ + size_t exts; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. Note that the non-zeroness of "magic" + also serves as an initialization flag. +*/ + +struct malloc_params { + volatile size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* Ensure mparams initialized */ +#define ensure_initialization() (mparams.magic != 0 || init_mparams()) + +#if !ONLY_MSPACES + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) + +#endif /* !ONLY_MSPACES */ + +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity - SIZE_T_ONE))\ + & ~(mparams.granularity - SIZE_T_ONE)) + + +/* For mmap, use granularity alignment on windows, else page-align */ +#ifdef WIN32 +#define mmap_align(S) granularity_align(S) +#else +#define mmap_align(S) page_align(S) +#endif + +/* For sys_alloc, enough padding to ensure can malloc request on success */ +#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS + +#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I. Use x86 asm if possible */ +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define compute_tree_index(S, I)\ +{\ + unsigned int X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "rm" (X));\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#elif defined (__INTEL_COMPILER) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K = _bit_scan_reverse (X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + _BitScanReverse((DWORD *) &K, X);\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + +/* index corresponding to given bit. Use x86 asm if possible */ + +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "rm" (X));\ + I = (bindex_t)J;\ +} + +#elif defined (__INTEL_COMPILER) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + J = _bit_scan_forward (X); \ + I = (bindex_t)J;\ +} + +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + _BitScanForward((DWORD *) &J, X);\ + I = (bindex_t)J;\ +} + +#elif USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* GNUC */ + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probablistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has its cinuse bit on */ +#define ok_cinuse(p) cinuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_cinuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +/* Initialize mparams */ +static int init_mparams(void) { +#ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) + init_malloc_global_mutex(); +#endif + + ACQUIRE_MALLOC_GLOBAL_LOCK(); + if (mparams.magic == 0) { + size_t magic; + size_t psize; + size_t gsize; + +#ifndef WIN32 + psize = malloc_getpagesize; + gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + psize = system_info.dwPageSize; + gsize = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((gsize & (gsize-SIZE_T_ONE)) != 0) || + ((psize & (psize-SIZE_T_ONE)) != 0)) + ABORT; + + mparams.granularity = gsize; + mparams.page_size = psize; + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if !ONLY_MSPACES + /* Set up lock for main malloc area */ + gm->mflags = mparams.default_mflags; + INITIAL_LOCK(&gm->mutex); +#endif + +#if (FOOTERS && !INSECURE) + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + magic = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ +#ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); +#else + magic = (size_t)(time(0) ^ (size_t)0x55555555U); +#endif + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + } +#else /* (FOOTERS && !INSECURE) */ + magic = (size_t)0x58585858U; +#endif /* (FOOTERS && !INSECURE) */ + + mparams.magic = magic; + } + + RELEASE_MALLOC_GLOBAL_LOCK(); + return 1; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val = (value == -1)? MAX_SIZE_T : (size_t)value; + ensure_initialization(); + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!pinuse(chunk_plus_offset(p, sz))); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(cinuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!cinuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || cinuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!cinuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (cinuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + /*assert(m->topsize == chunksize(m->top)); redundant */ + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + ensure_initialization(); + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!cinuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +static void internal_malloc_stats(mstate m) { + ensure_initialization(); + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!cinuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + + POSTACTION(m); + } +} + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (F == B)\ + clear_smallmap(M, I);\ + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ + (B == smallbin_at(M,I) || ok_address(M, B)))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F)\ + clear_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, F))) {\ + B->fd = F;\ + F->bk = B;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + + + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + assert(is_small(DVS));\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendents + correspond properly to bit masks. We use the rightmost descendent + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F))) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + (m == gm)? dlmalloc(b) : mspace_malloc(m, b) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). There is also enough space + allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain + the PINUSE bit so frees can be checked. +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_MMAPPED_BIT; + (p)->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, 1); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallbins = m->treebins = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + m->seg.sflags = mmapped; + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + + ensure_initialization(); + + /* Directly map large chunks */ + if (use_mmap(m) && nb >= mparams.mmap_threshold) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + or main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + + In all cases, we need to request enough bytes from system to ensure + we can malloc nb bytes upon success, so pad with enough space for + top_foot, plus alignment-pad to make sure we don't lose bytes if + not on boundary, and round this up to a granularity unit. + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + size_t asize = 0; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + asize = granularity_align(nb + SYS_ALLOC_PADDING); + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + asize += (page_align((size_t)base) - (size_t)base); + /* Can't call MORECORE if size is negative when treated as signed */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == base) { + tbase = base; + tsize = asize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + asize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); + /* Use mem here only if it did continuously extend old space */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { + tbase = br; + tsize = asize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (asize < HALF_MAX_SIZE_T && + asize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - asize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + asize += esize; + else { /* Can't use; try to release */ + (void) CALL_MORECORE(-asize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = asize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MALLOC_GLOBAL_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING); + if (rsize > nb) { /* Fail if wraps around zero */ + char* mp = (char*)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + mmap_flag = IS_MMAPPED_BIT; + } + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + size_t asize = granularity_align(nb + SYS_ALLOC_PADDING); + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MALLOC_GLOBAL_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + m->seg.base = m->least_addr = tbase; + m->seg.size = tsize; + m->seg.sflags = mmap_flag; + m->magic = mparams.magic; + m->release_checks = MAX_RELEASE_CHECK_RATE; + init_bins(m); +#if !ONLY_MSPACES + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else +#endif + { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + /* Only consider most recent segment if traversal suppressed */ + while (sp != 0 && tbase != sp->base + sp->size) + sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + int nsegs = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + ++nsegs; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + break; + pred = sp; + sp = next; + } + /* Reset check counter */ + m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)? + nsegs : MAX_RELEASE_CHECK_RATE); + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + ensure_initialization(); + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0 && m->topsize > m->trim_check) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +/* --------------------------- realloc support --------------------------- */ + +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + return 0; + } + if (!PREACTION(m)) { + mchunkptr oldp = mem2chunk(oldmem); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + void* extra = 0; + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && + ok_next(oldp, next) && ok_pinuse(next))) { + size_t nb = request2size(bytes); + if (is_mmapped(oldp)) + newp = mmap_resize(m, oldp, nb); + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr remainder = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, remainder, rsize); + extra = chunk2mem(remainder); + } + } + else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + } + else { + USAGE_ERROR_ACTION(m, oldmem); + POSTACTION(m); + return 0; + } + + POSTACTION(m); + + if (newp != 0) { + if (extra != 0) { + internal_free(m, extra); + } + check_inuse_chunk(m, newp); + return chunk2mem(newp); + } + else { + void* newmem = internal_malloc(m, bytes); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + return newmem; + } + } + return 0; +} + +/* --------------------------- memalign support -------------------------- */ + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ + return internal_malloc(m, bytes); + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + char* mem = (char*)internal_malloc(m, req); + if (mem != 0) { + void* leader = 0; + void* trailer = 0; + mchunkptr p = mem2chunk(mem); + + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)(mem + + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = (newsize|CINUSE_BIT); + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + leader = chunk2mem(p); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + trailer = chunk2mem(remainder); + } + } + + assert (chunksize(p) >= nb); + assert((((size_t)(chunk2mem(p))) % alignment) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + if (leader != 0) { + internal_free(m, leader); + } + if (trailer != 0) { + internal_free(m, trailer); + } + return chunk2mem(p); + } + } + return 0; +} + +/* ------------------------ comalloc/coalloc support --------------------- */ + +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + /* + This provides common support for independent_X routines, handling + all of the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed + */ + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + ensure_initialization(); + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + + +/* -------------------------- public routines ---------------------------- */ + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + +#if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ +#endif + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceeding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + check_free_chunk(fm, p); + } + else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + check_free_chunk(fm, p); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* dlrealloc(void* oldmem, size_t bytes) { + if (oldmem == 0) + return dlmalloc(bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + dlfree(oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(mem2chunk(oldmem)); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + return internal_realloc(m, oldmem, bytes); + } +} + +void* dlmemalign(size_t alignment, size_t bytes) { + return internal_memalign(gm, alignment, bytes); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + ensure_initialization(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + ensure_initialization(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +int dlmalloc_trim(size_t pad) { + ensure_initialization(); + int result = 0; + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +void dlmalloc_stats() { + internal_malloc_stats(gm); +} + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* !ONLY_MSPACES */ + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + INITIAL_LOCK(&m->mutex); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->release_checks = MAX_RELEASE_CHECK_RATE; + m->mflags = mparams.default_mflags; + m->extp = 0; + m->exts = 0; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize; + ensure_initialization(); + msize = pad_request(sizeof(struct malloc_state)); + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + m->seg.sflags = IS_MMAPPED_BIT; + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize; + ensure_initialization(); + msize = pad_request(sizeof(struct malloc_state)); + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + m->seg.sflags = EXTERN_BIT; + set_lock(m, locked); + } + return (mspace)m; +} + +int mspace_mmap_large_chunks(mspace msp, int enable) { + int ret = 0; + mstate ms = (mstate)msp; + if (!PREACTION(ms)) { + if (use_mmap(ms)) + ret = 1; + if (enable) + enable_mmap(ms); + else + disable_mmap(ms); + POSTACTION(ms); + } + return ret; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = sp->sflags; + sp = sp->next; + if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + check_free_chunk(fm, p); + } + else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + check_free_chunk(fm, p); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + if (oldmem == 0) + return mspace_malloc(msp, bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + mspace_free(msp, oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if FOOTERS + mchunkptr p = mem2chunk(oldmem); + mstate ms = get_mstate_for(p); +#else /* FOOTERS */ + mstate ms = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_realloc(ms, oldmem, bytes); + } +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} + +size_t mspace_footprint(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + + +size_t mspace_max_footprint(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +size_t mspace_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + V2.8.4 (not yet released) + * Add mspace_mmap_large_chunks; thanks to Jean Brouwers + * Fix insufficient sys_alloc padding when using 16byte alignment + * Fix bad error check in mspace_footprint + * Adaptations for ptmalloc, courtesy of Wolfram Gloger. + * Reentrant spin locks, courtesy of Earl Chew and others + * Win32 improvements, courtesy of Niall Douglas and Earl Chew + * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options + * Extension hook in malloc_state + * Various small adjustments to reduce warnings on some compilers + * Various configuration extensions/changes for more platforms. Thanks + to all who contributed these. + + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshhold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occuring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ + + diff --git a/sources/nedmalloc/nedmalloc.c b/sources/nedmalloc/nedmalloc.c new file mode 100755 index 00000000..4dfbded9 --- /dev/null +++ b/sources/nedmalloc/nedmalloc.c @@ -0,0 +1,950 @@ +/* Alternative malloc implementation for multiple threads without +lock contention based on dlmalloc. (C) 2005-2006 Niall Douglas + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +*/ + +#ifdef _MSC_VER +/* Enable full aliasing on MSVC */ +/*#pragma optimize("a", on)*/ +#endif + +/*#define FULLSANITYCHECKS*/ + +#include "nedmalloc.h" +#ifdef WIN32 + #include +#endif +#define MSPACES 1 +#define ONLY_MSPACES 1 +#ifndef USE_LOCKS + #define USE_LOCKS 1 +#endif +#define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */ +#undef DEBUG /* dlmalloc wants DEBUG either 0 or 1 */ +#ifdef _DEBUG + #define DEBUG 1 +#else + #define DEBUG 0 +#endif +#ifdef NDEBUG /* Disable assert checking on release builds */ + #undef DEBUG +#endif +/* The default of 64Kb means we spend too much time kernel-side */ +#ifndef DEFAULT_GRANULARITY +#define DEFAULT_GRANULARITY (1*1024*1024) +#endif +/*#define USE_SPIN_LOCKS 0*/ + + +/*#define FORCEINLINE*/ +#include "malloc.c.h" +#ifdef NDEBUG /* Disable assert checking on release builds */ + #undef DEBUG +#endif + +/* The maximum concurrent threads in a pool possible */ +#ifndef MAXTHREADSINPOOL +#define MAXTHREADSINPOOL 16 +#endif +/* The maximum number of threadcaches which can be allocated */ +#ifndef THREADCACHEMAXCACHES +#define THREADCACHEMAXCACHES 256 +#endif +/* The maximum size to be allocated from the thread cache */ +#ifndef THREADCACHEMAX +#define THREADCACHEMAX 8192 +#endif +#if 0 +/* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */ +#define THREADCACHEMAXBINS ((13-4)*2) +#else +/* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */ +#define THREADCACHEMAXBINS (13-4) +#endif +/* Point at which the free space in a thread cache is garbage collected */ +#ifndef THREADCACHEMAXFREESPACE +#define THREADCACHEMAXFREESPACE (512*1024) +#endif + + +#ifdef WIN32 + #define TLSVAR DWORD + #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k)) + #define TLSFREE(k) (!TlsFree(k)) + #define TLSGET(k) TlsGetValue(k) + #define TLSSET(k, a) (!TlsSetValue(k, a)) + #ifdef DEBUG +static LPVOID ChkedTlsGetValue(DWORD idx) +{ + LPVOID ret=TlsGetValue(idx); + assert(S_OK==GetLastError()); + return ret; +} + #undef TLSGET + #define TLSGET(k) ChkedTlsGetValue(k) + #endif +#else + #define TLSVAR pthread_key_t + #define TLSALLOC(k) pthread_key_create(k, 0) + #define TLSFREE(k) pthread_key_delete(k) + #define TLSGET(k) pthread_getspecific(k) + #define TLSSET(k, a) pthread_setspecific(k, a) +#endif + +#if 0 +/* Only enable if testing with valgrind. Causes misoperation */ +#define mspace_malloc(p, s) malloc(s) +#define mspace_realloc(p, m, s) realloc(m, s) +#define mspace_calloc(p, n, s) calloc(n, s) +#define mspace_free(p, m) free(m) +#endif + + +#if defined(__cplusplus) +#if !defined(NO_NED_NAMESPACE) +namespace nedalloc { +#else +extern "C" { +#endif +#endif + +size_t nedblksize(void *mem) THROWSPEC +{ +#if 0 + /* Only enable if testing with valgrind. Causes misoperation */ + return THREADCACHEMAX; +#else + if(mem) + { + mchunkptr p=mem2chunk(mem); + assert(cinuse(p)); /* If this fails, someone tried to free a block twice */ + if(cinuse(p)) + return chunksize(p)-overhead_for(p); + } + return 0; +#endif +} + +void nedsetvalue(void *v) THROWSPEC { nedpsetvalue(0, v); } +void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc(0, size); } +void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc(0, no, size); } +void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc(0, mem, size); } +void nedfree(void *mem) THROWSPEC { nedpfree(0, mem); } +void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign(0, alignment, bytes); } +#if !NO_MALLINFO +struct mallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo(0); } +#endif +int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt(0, parno, value); } +int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim(0, pad); } +void nedmalloc_stats() THROWSPEC { nedpmalloc_stats(0); } +size_t nedmalloc_footprint() THROWSPEC { return nedpmalloc_footprint(0); } +void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc(0, elemsno, elemsize, chunks); } +void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc(0, elems, sizes, chunks); } + +struct threadcacheblk_t; +typedef struct threadcacheblk_t threadcacheblk; +struct threadcacheblk_t +{ /* Keep less than 16 bytes on 32 bit systems and 32 bytes on 64 bit systems */ +#ifdef FULLSANITYCHECKS + unsigned int magic; +#endif + unsigned int lastUsed, size; + threadcacheblk *next, *prev; +}; +typedef struct threadcache_t +{ +#ifdef FULLSANITYCHECKS + unsigned int magic1; +#endif + int mymspace; /* Last mspace entry this thread used */ + long threadid; + unsigned int mallocs, frees, successes; + size_t freeInCache; /* How much free space is stored in this cache */ + threadcacheblk *bins[(THREADCACHEMAXBINS+1)*2]; +#ifdef FULLSANITYCHECKS + unsigned int magic2; +#endif +} threadcache; +struct nedpool_t +{ + MLOCK_T mutex; + void *uservalue; + int threads; /* Max entries in m to use */ + threadcache *caches[THREADCACHEMAXCACHES]; + TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */ + mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */ +}; +static nedpool syspool; + +static FORCEINLINE unsigned int size2binidx(size_t _size) THROWSPEC +{ /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */ + unsigned int topbit, size=(unsigned int)(_size>>4); + /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */ + +#if defined(__GNUC__) + topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size); +#elif defined(_MSC_VER) && _MSC_VER>=1300 + { + unsigned long bsrTopBit; + + _BitScanReverse(&bsrTopBit, size); + + topbit = bsrTopBit; + } +#else +#if 0 + union { + unsigned asInt[2]; + double asDouble; + }; + int n; + + asDouble = (double)size + 0.5; + topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023; +#else + { + unsigned int x=size; + x = x | (x >> 1); + x = x | (x >> 2); + x = x | (x >> 4); + x = x | (x >> 8); + x = x | (x >>16); + x = ~x; + x = x - ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x = x + (x << 8); + x = x + (x << 16); + topbit=31 - (x >> 24); + } +#endif +#endif + return topbit; +} + + +#ifdef FULLSANITYCHECKS +static void tcsanitycheck(threadcacheblk **ptr) THROWSPEC +{ + assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1])); + if(ptr[0] && ptr[1]) + { + assert(nedblksize(ptr[0])>=sizeof(threadcacheblk)); + assert(nedblksize(ptr[1])>=sizeof(threadcacheblk)); + assert(*(unsigned int *) "NEDN"==ptr[0]->magic); + assert(*(unsigned int *) "NEDN"==ptr[1]->magic); + assert(!ptr[0]->prev); + assert(!ptr[1]->next); + if(ptr[0]==ptr[1]) + { + assert(!ptr[0]->next); + assert(!ptr[1]->prev); + } + } +} +static void tcfullsanitycheck(threadcache *tc) THROWSPEC +{ + threadcacheblk **tcbptr=tc->bins; + int n; + for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) + { + threadcacheblk *b, *ob=0; + tcsanitycheck(tcbptr); + for(b=tcbptr[0]; b; ob=b, b=b->next) + { + assert(*(unsigned int *) "NEDN"==b->magic); + assert(!ob || ob->next==b); + assert(!ob || b->prev==ob); + } + } +} +#endif + +static NOINLINE void RemoveCacheEntries(nedpool *p, threadcache *tc, unsigned int age) THROWSPEC +{ +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif + if(tc->freeInCache) + { + threadcacheblk **tcbptr=tc->bins; + int n; + for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) + { + threadcacheblk **tcb=tcbptr+1; /* come from oldest end of list */ + /*tcsanitycheck(tcbptr);*/ + for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; ) + { + threadcacheblk *f=*tcb; + size_t blksize=f->size; /*nedblksize(f);*/ + assert(blksize<=nedblksize(f)); + assert(blksize); +#ifdef FULLSANITYCHECKS + assert(*(unsigned int *) "NEDN"==(*tcb)->magic); +#endif + *tcb=(*tcb)->prev; + if(*tcb) + (*tcb)->next=0; + else + *tcbptr=0; + tc->freeInCache-=blksize; + assert((long) tc->freeInCache>=0); + mspace_free(0, f); + /*tcsanitycheck(tcbptr);*/ + } + } + } +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif +} +static void DestroyCaches(nedpool *p) THROWSPEC +{ + if(p->caches) + { + threadcache *tc; + int n; + for(n=0; ncaches[n])) + { + tc->frees++; + RemoveCacheEntries(p, tc, 0); + assert(!tc->freeInCache); + tc->mymspace=-1; + tc->threadid=0; + mspace_free(0, tc); + p->caches[n]=0; + } + } + } +} + +static NOINLINE threadcache *AllocCache(nedpool *p) THROWSPEC +{ + threadcache *tc=0; + int n, end; + ACQUIRE_LOCK(&p->mutex); + for(n=0; ncaches[n]; n++); + if(THREADCACHEMAXCACHES==n) + { /* List exhausted, so disable for this thread */ + RELEASE_LOCK(&p->mutex); + return 0; + } + tc=p->caches[n]=(threadcache *) mspace_calloc(p->m[0], 1, sizeof(threadcache)); + if(!tc) + { + RELEASE_LOCK(&p->mutex); + return 0; + } +#ifdef FULLSANITYCHECKS + tc->magic1=*(unsigned int *)"NEDMALC1"; + tc->magic2=*(unsigned int *)"NEDMALC2"; +#endif + tc->threadid=(long)(size_t)CURRENT_THREAD; + for(end=0; p->m[end]; end++); + tc->mymspace=tc->threadid % end; + RELEASE_LOCK(&p->mutex); + if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort(); + return tc; +} + +static void *threadcache_malloc(nedpool *p, threadcache *tc, size_t *size) THROWSPEC +{ + void *ret=0; + unsigned int bestsize; + unsigned int idx=size2binidx(*size); + size_t blksize=0; + threadcacheblk *blk, **binsptr; +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif + /* Calculate best fit bin size */ + bestsize=1<<(idx+4); +#if 0 + /* Finer grained bin fit */ + idx<<=1; + if(*size>bestsize) + { + idx++; + bestsize+=bestsize>>1; + } + if(*size>bestsize) + { + idx++; + bestsize=1<<(4+(idx>>1)); + } +#else + if(*size>bestsize) + { + idx++; + bestsize<<=1; + } +#endif + assert(bestsize>=*size); + if(*sizebins[idx*2]; + /* Try to match close, but move up a bin if necessary */ + blk=*binsptr; + if(!blk || blk->size<*size) + { /* Bump it up a bin */ + if(idxsize; /*nedblksize(blk);*/ + assert(nedblksize(blk)>=blksize); + assert(blksize>=*size); + if(blk->next) + blk->next->prev=0; + *binsptr=blk->next; + if(!*binsptr) + binsptr[1]=0; +#ifdef FULLSANITYCHECKS + blk->magic=0; +#endif + assert(binsptr[0]!=blk && binsptr[1]!=blk); + assert(nedblksize(blk)>=sizeof(threadcacheblk) && nedblksize(blk)<=THREADCACHEMAX+CHUNK_OVERHEAD); + /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) size);*/ + ret=(void *) blk; + } + ++tc->mallocs; + if(ret) + { + assert(blksize>=*size); + ++tc->successes; + tc->freeInCache-=blksize; + assert((long) tc->freeInCache>=0); + } +#if defined(DEBUG) && 0 + if(!(tc->mallocs & 0xfff)) + { + printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs, + (float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache); + } +#endif +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif + return ret; +} +static NOINLINE void ReleaseFreeInCache(nedpool *p, threadcache *tc, int mymspace) THROWSPEC +{ + unsigned int age=THREADCACHEMAXFREESPACE/8192; + /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/ + while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE) + { + RemoveCacheEntries(p, tc, age); + /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/ + age>>=1; + } + /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/ +} +static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *mem, size_t size) THROWSPEC +{ + unsigned int bestsize; + unsigned int idx=size2binidx(size); + threadcacheblk **binsptr, *tck=(threadcacheblk *) mem; + assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD); +#ifdef DEBUG + { /* Make sure this is a valid memory block */ + mchunkptr cp = mem2chunk(mem); + mstate fm = get_mstate_for(cp); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, cp); + return; + } + } +#endif +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif + /* Calculate best fit bin size */ + bestsize=1<<(idx+4); +#if 0 + /* Finer grained bin fit */ + idx<<=1; + if(size>bestsize) + { + unsigned int biggerbestsize=bestsize+bestsize<<1; + if(size>=biggerbestsize) + { + idx++; + bestsize=biggerbestsize; + } + } +#endif + if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */ + size=bestsize; + binsptr=&tc->bins[idx*2]; + assert(idx<=THREADCACHEMAXBINS); + if(tck==*binsptr) + { + fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", (void*)tck); + abort(); + } +#ifdef FULLSANITYCHECKS + tck->magic=*(unsigned int *) "NEDN"; +#endif + tck->lastUsed=++tc->frees; + tck->size=(unsigned int) size; + tck->next=*binsptr; + tck->prev=0; + if(tck->next) + tck->next->prev=tck; + else + binsptr[1]=tck; + assert(!*binsptr || (*binsptr)->size==tck->size); + *binsptr=tck; + assert(tck==tc->bins[idx*2]); + assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck); + /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/ + tc->freeInCache+=size; +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif +#if 1 + if(tc->freeInCache>=THREADCACHEMAXFREESPACE) + ReleaseFreeInCache(p, tc, mymspace); +#endif +} + + + + +static NOINLINE int InitPool(nedpool *p, size_t capacity, int threads) THROWSPEC +{ /* threads is -1 for system pool */ + ensure_initialization(); + ACQUIRE_MALLOC_GLOBAL_LOCK(); + if(p->threads) goto done; + if(INITIAL_LOCK(&p->mutex)) goto err; + if(TLSALLOC(&p->mycache)) goto err; + if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err; + p->m[0]->extp=p; + p->threads=(threads<1 || threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : threads; +done: + RELEASE_MALLOC_GLOBAL_LOCK(); + return 1; +err: + if(threads<0) + abort(); /* If you can't allocate for system pool, we're screwed */ + DestroyCaches(p); + if(p->m[0]) + { + destroy_mspace(p->m[0]); + p->m[0]=0; + } + if(p->mycache) + { + if(TLSFREE(p->mycache)) abort(); + p->mycache=0; + } + RELEASE_MALLOC_GLOBAL_LOCK(); + return 0; +} +static NOINLINE mstate FindMSpace(nedpool *p, threadcache *tc, int *lastUsed, size_t size) THROWSPEC +{ /* Gets called when thread's last used mspace is in use. The strategy + is to run through the list of all available mspaces looking for an + unlocked one and if we fail, we create a new one so long as we don't + exceed p->threads */ + int n, end; + for(n=end=*lastUsed+1; p->m[n]; end=++n) + { + if(TRY_LOCK(&p->m[n]->mutex)) goto found; + } + for(n=0; n<*lastUsed && p->m[n]; n++) + { + if(TRY_LOCK(&p->m[n]->mutex)) goto found; + } + if(endthreads) + { + mstate temp; + if(!(temp=(mstate) create_mspace(size, 1))) + goto badexit; + /* Now we're ready to modify the lists, we lock */ + ACQUIRE_LOCK(&p->mutex); + while(p->m[end] && endthreads) + end++; + if(end>=p->threads) + { /* Drat, must destroy it now */ + RELEASE_LOCK(&p->mutex); + destroy_mspace((mspace) temp); + goto badexit; + } + /* We really want to make sure this goes into memory now but we + have to be careful of breaking aliasing rules, so write it twice */ + *((volatile struct malloc_state **) &p->m[end])=p->m[end]=temp; + ACQUIRE_LOCK(&p->m[end]->mutex); + /*printf("Created mspace idx %d\n", end);*/ + RELEASE_LOCK(&p->mutex); + n=end; + goto found; + } + /* Let it lock on the last one it used */ +badexit: + ACQUIRE_LOCK(&p->m[*lastUsed]->mutex); + return p->m[*lastUsed]; +found: + *lastUsed=n; + if(tc) + tc->mymspace=n; + else + { + if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort(); + } + return p->m[n]; +} + +nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC +{ + nedpool *ret; + if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) return 0; + if(!InitPool(ret, capacity, threads)) + { + nedpfree(0, ret); + return 0; + } + return ret; +} +void neddestroypool(nedpool *p) THROWSPEC +{ + int n; + ACQUIRE_LOCK(&p->mutex); + DestroyCaches(p); + for(n=0; p->m[n]; n++) + { + destroy_mspace(p->m[n]); + p->m[n]=0; + } + RELEASE_LOCK(&p->mutex); + if(TLSFREE(p->mycache)) abort(); + nedpfree(0, p); +} + +void nedpsetvalue(nedpool *p, void *v) THROWSPEC +{ + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + p->uservalue=v; +} +void *nedgetvalue(nedpool **p, void *mem) THROWSPEC +{ + nedpool *np=0; + mchunkptr mcp=mem2chunk(mem); + mstate fm; + if(!(is_aligned(chunk2mem(mcp))) && mcp->head != FENCEPOST_HEAD) return 0; + if(!cinuse(mcp)) return 0; + if(!next_pinuse(mcp)) return 0; + if(!is_mmapped(mcp) && !pinuse(mcp)) + { + if(next_chunk(prev_chunk(mcp))!=mcp) return 0; + } + fm=get_mstate_for(mcp); + if(!ok_magic(fm)) return 0; + if(!ok_address(fm, mcp)) return 0; + if(!fm->extp) return 0; + np=(nedpool *) fm->extp; + if(p) *p=np; + return np->uservalue; +} + +void neddisablethreadcache(nedpool *p) THROWSPEC +{ + int mycache; + if(!p) + { + p=&syspool; + if(!syspool.threads) InitPool(&syspool, 0, -1); + } + mycache=(int)(size_t) TLSGET(p->mycache); + if(!mycache) + { /* Set to mspace 0 */ + if(TLSSET(p->mycache, (void *)-1)) abort(); + } + else if(mycache>0) + { /* Set to last used mspace */ + threadcache *tc=p->caches[mycache-1]; +#if defined(DEBUG) + printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n", + 100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs); +#endif + if(TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort(); + tc->frees++; + RemoveCacheEntries(p, tc, 0); + assert(!tc->freeInCache); + tc->mymspace=-1; + tc->threadid=0; + mspace_free(0, p->caches[mycache-1]); + p->caches[mycache-1]=0; + } +} + +#define GETMSPACE(m,p,tc,ms,s,action) \ + do \ + { \ + mstate m = GetMSpace((p),(tc),(ms),(s)); \ + action; \ + RELEASE_LOCK(&m->mutex); \ + } while (0) + +static FORCEINLINE mstate GetMSpace(nedpool *p, threadcache *tc, int mymspace, size_t size) THROWSPEC +{ /* Returns a locked and ready for use mspace */ + mstate m=p->m[mymspace]; + assert(m); + if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size);\ + /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/ + return m; +} +static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymspace, size_t *size) THROWSPEC +{ + int mycache; + if(size && *sizemycache); + if(mycache>0) + { + *tc=(*p)->caches[mycache-1]; + *mymspace=(*tc)->mymspace; + } + else if(!mycache) + { + *tc=AllocCache(*p); + if(!*tc) + { /* Disable */ + if(TLSSET((*p)->mycache, (void *)-1)) abort(); + *mymspace=0; + } + else + *mymspace=(*tc)->mymspace; + } + else + { + *tc=0; + *mymspace=-mycache-1; + } + assert(*mymspace>=0); + assert((long)(size_t)CURRENT_THREAD==(*tc)->threadid); +#ifdef FULLSANITYCHECKS + if(*tc) + { + if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2) + { + abort(); + } + } +#endif +} + +void * nedpmalloc(nedpool *p, size_t size) THROWSPEC +{ + void *ret=0; + threadcache *tc; + int mymspace; + GetThreadCache(&p, &tc, &mymspace, &size); +#if THREADCACHEMAX + if(tc && size<=THREADCACHEMAX) + { /* Use the thread cache */ + ret=threadcache_malloc(p, tc, &size); + } +#endif + if(!ret) + { /* Use this thread's mspace */ + GETMSPACE(m, p, tc, mymspace, size, + ret=mspace_malloc(m, size)); + } + return ret; +} +void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC +{ + size_t rsize=size*no; + void *ret=0; + threadcache *tc; + int mymspace; + GetThreadCache(&p, &tc, &mymspace, &rsize); +#if THREADCACHEMAX + if(tc && rsize<=THREADCACHEMAX) + { /* Use the thread cache */ + if((ret=threadcache_malloc(p, tc, &rsize))) + memset(ret, 0, rsize); + } +#endif + if(!ret) + { /* Use this thread's mspace */ + GETMSPACE(m, p, tc, mymspace, rsize, + ret=mspace_calloc(m, 1, rsize)); + } + return ret; +} +void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC +{ + void *ret=0; + threadcache *tc; + int mymspace; + if(!mem) return nedpmalloc(p, size); + GetThreadCache(&p, &tc, &mymspace, &size); +#if THREADCACHEMAX + if(tc && size && size<=THREADCACHEMAX) + { /* Use the thread cache */ + size_t memsize=nedblksize(mem); + assert(memsize); + if((ret=threadcache_malloc(p, tc, &size))) + { + memcpy(ret, mem, memsizem[n]; n++) + { + struct mallinfo t=mspace_mallinfo(p->m[n]); + ret.arena+=t.arena; + ret.ordblks+=t.ordblks; + ret.hblkhd+=t.hblkhd; + ret.usmblks+=t.usmblks; + ret.uordblks+=t.uordblks; + ret.fordblks+=t.fordblks; + ret.keepcost+=t.keepcost; + } + return ret; +} +#endif +int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC +{ + return mspace_mallopt(parno, value); +} +int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC +{ + int n, ret=0; + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + for(n=0; p->m[n]; n++) + { + ret+=mspace_trim(p->m[n], pad); + } + return ret; +} +void nedpmalloc_stats(nedpool *p) THROWSPEC +{ + int n; + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + for(n=0; p->m[n]; n++) + { + mspace_malloc_stats(p->m[n]); + } +} +size_t nedpmalloc_footprint(nedpool *p) THROWSPEC +{ + size_t ret=0; + int n; + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + for(n=0; p->m[n]; n++) + { + ret+=mspace_footprint(p->m[n]); + } + return ret; +} +void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC +{ + void **ret; + threadcache *tc; + int mymspace; + GetThreadCache(&p, &tc, &mymspace, &elemsize); + GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, + ret=mspace_independent_calloc(m, elemsno, elemsize, chunks)); + return ret; +} +void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC +{ + void **ret; + threadcache *tc; + int mymspace; + size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); + if(!adjustedsizes) return 0; + for(i=0; i /* for size_t */ + +#ifndef EXTSPEC + #define EXTSPEC extern +#endif + +#if defined(_MSC_VER) && _MSC_VER>=1400 + #define MALLOCATTR __declspec(restrict) +#endif +#ifdef __GNUC__ + #define MALLOCATTR __attribute__ ((malloc)) +#endif +#ifndef MALLOCATTR + #define MALLOCATTR +#endif + +#ifdef REPLACE_SYSTEM_ALLOCATOR + #define nedmalloc malloc + #define nedcalloc calloc + #define nedrealloc realloc + #define nedfree free + #define nedmemalign memalign + #define nedmallinfo mallinfo + #define nedmallopt mallopt + #define nedmalloc_trim malloc_trim + #define nedmalloc_stats malloc_stats + #define nedmalloc_footprint malloc_footprint + #define nedindependent_calloc independent_calloc + #define nedindependent_comalloc independent_comalloc + #ifdef _MSC_VER + #define nedblksize _msize + #endif +#endif + +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif + +#if !NO_MALLINFO +struct mallinfo; +#endif + +#if defined(__cplusplus) + #if !defined(NO_NED_NAMESPACE) +namespace nedalloc { + #else +extern "C" { + #endif + #define THROWSPEC throw() +#else + #define THROWSPEC +#endif + +/* These are the global functions */ + +/* Gets the usable size of an allocated block. Note this will always be bigger than what was +asked for due to rounding etc. +*/ +EXTSPEC size_t nedblksize(void *mem) THROWSPEC; + +EXTSPEC void nedsetvalue(void *v) THROWSPEC; + +EXTSPEC MALLOCATTR void * nedmalloc(size_t size) THROWSPEC; +EXTSPEC MALLOCATTR void * nedcalloc(size_t no, size_t size) THROWSPEC; +EXTSPEC MALLOCATTR void * nedrealloc(void *mem, size_t size) THROWSPEC; +EXTSPEC void nedfree(void *mem) THROWSPEC; +EXTSPEC MALLOCATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC; +#if !NO_MALLINFO +EXTSPEC struct mallinfo nedmallinfo(void) THROWSPEC; +#endif +EXTSPEC int nedmallopt(int parno, int value) THROWSPEC; +EXTSPEC int nedmalloc_trim(size_t pad) THROWSPEC; +EXTSPEC void nedmalloc_stats(void) THROWSPEC; +EXTSPEC size_t nedmalloc_footprint(void) THROWSPEC; +EXTSPEC MALLOCATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC; +EXTSPEC MALLOCATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC; + +/* These are the pool functions */ +struct nedpool_t; +typedef struct nedpool_t nedpool; + +/* Creates a memory pool for use with the nedp* functions below. +Capacity is how much to allocate immediately (if you know you'll be allocating a lot +of memory very soon) which you can leave at zero. Threads specifies how many threads +will *normally* be accessing the pool concurrently. Setting this to zero means it +extends on demand, but be careful of this as it can rapidly consume system resources +where bursts of concurrent threads use a pool at once. +*/ +EXTSPEC MALLOCATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC; + +/* Destroys a memory pool previously created by nedcreatepool(). +*/ +EXTSPEC void neddestroypool(nedpool *p) THROWSPEC; + +/* Sets a value to be associated with a pool. You can retrieve this value by passing +any memory block allocated from that pool. +*/ +EXTSPEC void nedpsetvalue(nedpool *p, void *v) THROWSPEC; +/* Gets a previously set value using nedpsetvalue() or zero if memory is unknown. +Optionally can also retrieve pool. +*/ +EXTSPEC void *nedgetvalue(nedpool **p, void *mem) THROWSPEC; + +/* Disables the thread cache for the calling thread, returning any existing cache +data to the central pool. +*/ +EXTSPEC void neddisablethreadcache(nedpool *p) THROWSPEC; + +EXTSPEC MALLOCATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC; +EXTSPEC MALLOCATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC; +EXTSPEC MALLOCATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC; +EXTSPEC void nedpfree(nedpool *p, void *mem) THROWSPEC; +EXTSPEC MALLOCATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC; +#if !NO_MALLINFO +EXTSPEC struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC; +#endif +EXTSPEC int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC; +EXTSPEC int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC; +EXTSPEC void nedpmalloc_stats(nedpool *p) THROWSPEC; +EXTSPEC size_t nedpmalloc_footprint(nedpool *p) THROWSPEC; +EXTSPEC MALLOCATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC; +EXTSPEC MALLOCATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC; + +#if defined(__cplusplus) +} +#endif + +#undef MALLOCATTR +#undef EXTSPEC + +#endif diff --git a/sources/pvrsdk/Include/CL/cl.h b/sources/pvrsdk/Include/CL/cl.h new file mode 100755 index 00000000..3127823a --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl.h @@ -0,0 +1,1466 @@ +/******************************************************************************* + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + ******************************************************************************/ + +#ifndef __OPENCL_CL_H +#define __OPENCL_CL_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************/ + +typedef struct _cl_platform_id * cl_platform_id; +typedef struct _cl_device_id * cl_device_id; +typedef struct _cl_context * cl_context; +typedef struct _cl_command_queue * cl_command_queue; +typedef struct _cl_mem * cl_mem; +typedef struct _cl_program * cl_program; +typedef struct _cl_kernel * cl_kernel; +typedef struct _cl_event * cl_event; +typedef struct _cl_sampler * cl_sampler; + +typedef cl_uint cl_bool; /* WARNING! Unlike cl_ types in cl_platform.h, cl_bool is not guaranteed to be the same size as the bool in kernels. */ +typedef cl_ulong cl_bitfield; +typedef cl_bitfield cl_device_type; +typedef cl_uint cl_platform_info; +typedef cl_uint cl_device_info; +typedef cl_bitfield cl_device_fp_config; +typedef cl_uint cl_device_mem_cache_type; +typedef cl_uint cl_device_local_mem_type; +typedef cl_bitfield cl_device_exec_capabilities; +typedef cl_bitfield cl_device_svm_capabilities; +typedef cl_bitfield cl_command_queue_properties; +typedef intptr_t cl_device_partition_property; +typedef cl_bitfield cl_device_affinity_domain; + +typedef intptr_t cl_context_properties; +typedef cl_uint cl_context_info; +typedef cl_bitfield cl_queue_properties; +typedef cl_uint cl_command_queue_info; +typedef cl_uint cl_channel_order; +typedef cl_uint cl_channel_type; +typedef cl_bitfield cl_mem_flags; +typedef cl_bitfield cl_svm_mem_flags; +typedef cl_uint cl_mem_object_type; +typedef cl_uint cl_mem_info; +typedef cl_bitfield cl_mem_migration_flags; +typedef cl_uint cl_image_info; +typedef cl_uint cl_buffer_create_type; +typedef cl_uint cl_addressing_mode; +typedef cl_uint cl_filter_mode; +typedef cl_uint cl_sampler_info; +typedef cl_bitfield cl_map_flags; +typedef intptr_t cl_pipe_properties; +typedef cl_uint cl_pipe_info; +typedef cl_uint cl_program_info; +typedef cl_uint cl_program_build_info; +typedef cl_uint cl_program_binary_type; +typedef cl_int cl_build_status; +typedef cl_uint cl_kernel_info; +typedef cl_uint cl_kernel_arg_info; +typedef cl_uint cl_kernel_arg_address_qualifier; +typedef cl_uint cl_kernel_arg_access_qualifier; +typedef cl_bitfield cl_kernel_arg_type_qualifier; +typedef cl_uint cl_kernel_work_group_info; +typedef cl_uint cl_kernel_sub_group_info; +typedef cl_uint cl_event_info; +typedef cl_uint cl_command_type; +typedef cl_uint cl_profiling_info; +typedef cl_bitfield cl_sampler_properties; +typedef cl_uint cl_kernel_exec_info; + +typedef struct _cl_image_format { + cl_channel_order image_channel_order; + cl_channel_type image_channel_data_type; +} cl_image_format; + +typedef struct _cl_image_desc { + cl_mem_object_type image_type; + size_t image_width; + size_t image_height; + size_t image_depth; + size_t image_array_size; + size_t image_row_pitch; + size_t image_slice_pitch; + cl_uint num_mip_levels; + cl_uint num_samples; +#ifdef __GNUC__ + __extension__ /* Prevents warnings about anonymous union in -pedantic builds */ +#endif + union { + cl_mem buffer; + cl_mem mem_object; + }; +} cl_image_desc; + +typedef struct _cl_buffer_region { + size_t origin; + size_t size; +} cl_buffer_region; + + +/******************************************************************************/ + +/* Error Codes */ +#define CL_SUCCESS 0 +#define CL_DEVICE_NOT_FOUND -1 +#define CL_DEVICE_NOT_AVAILABLE -2 +#define CL_COMPILER_NOT_AVAILABLE -3 +#define CL_MEM_OBJECT_ALLOCATION_FAILURE -4 +#define CL_OUT_OF_RESOURCES -5 +#define CL_OUT_OF_HOST_MEMORY -6 +#define CL_PROFILING_INFO_NOT_AVAILABLE -7 +#define CL_MEM_COPY_OVERLAP -8 +#define CL_IMAGE_FORMAT_MISMATCH -9 +#define CL_IMAGE_FORMAT_NOT_SUPPORTED -10 +#define CL_BUILD_PROGRAM_FAILURE -11 +#define CL_MAP_FAILURE -12 +#define CL_MISALIGNED_SUB_BUFFER_OFFSET -13 +#define CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST -14 +#define CL_COMPILE_PROGRAM_FAILURE -15 +#define CL_LINKER_NOT_AVAILABLE -16 +#define CL_LINK_PROGRAM_FAILURE -17 +#define CL_DEVICE_PARTITION_FAILED -18 +#define CL_KERNEL_ARG_INFO_NOT_AVAILABLE -19 + +#define CL_INVALID_VALUE -30 +#define CL_INVALID_DEVICE_TYPE -31 +#define CL_INVALID_PLATFORM -32 +#define CL_INVALID_DEVICE -33 +#define CL_INVALID_CONTEXT -34 +#define CL_INVALID_QUEUE_PROPERTIES -35 +#define CL_INVALID_COMMAND_QUEUE -36 +#define CL_INVALID_HOST_PTR -37 +#define CL_INVALID_MEM_OBJECT -38 +#define CL_INVALID_IMAGE_FORMAT_DESCRIPTOR -39 +#define CL_INVALID_IMAGE_SIZE -40 +#define CL_INVALID_SAMPLER -41 +#define CL_INVALID_BINARY -42 +#define CL_INVALID_BUILD_OPTIONS -43 +#define CL_INVALID_PROGRAM -44 +#define CL_INVALID_PROGRAM_EXECUTABLE -45 +#define CL_INVALID_KERNEL_NAME -46 +#define CL_INVALID_KERNEL_DEFINITION -47 +#define CL_INVALID_KERNEL -48 +#define CL_INVALID_ARG_INDEX -49 +#define CL_INVALID_ARG_VALUE -50 +#define CL_INVALID_ARG_SIZE -51 +#define CL_INVALID_KERNEL_ARGS -52 +#define CL_INVALID_WORK_DIMENSION -53 +#define CL_INVALID_WORK_GROUP_SIZE -54 +#define CL_INVALID_WORK_ITEM_SIZE -55 +#define CL_INVALID_GLOBAL_OFFSET -56 +#define CL_INVALID_EVENT_WAIT_LIST -57 +#define CL_INVALID_EVENT -58 +#define CL_INVALID_OPERATION -59 +#define CL_INVALID_GL_OBJECT -60 +#define CL_INVALID_BUFFER_SIZE -61 +#define CL_INVALID_MIP_LEVEL -62 +#define CL_INVALID_GLOBAL_WORK_SIZE -63 +#define CL_INVALID_PROPERTY -64 +#define CL_INVALID_IMAGE_DESCRIPTOR -65 +#define CL_INVALID_COMPILER_OPTIONS -66 +#define CL_INVALID_LINKER_OPTIONS -67 +#define CL_INVALID_DEVICE_PARTITION_COUNT -68 +#define CL_INVALID_PIPE_SIZE -69 +#define CL_INVALID_DEVICE_QUEUE -70 +#define CL_INVALID_SPEC_ID -71 +#define CL_MAX_SIZE_RESTRICTION_EXCEEDED -72 + +/* OpenCL Version */ +#define CL_VERSION_1_0 1 +#define CL_VERSION_1_1 1 +#define CL_VERSION_1_2 1 +#define CL_VERSION_2_0 1 +#define CL_VERSION_2_1 1 +#define CL_VERSION_2_2 1 + +/* cl_bool */ +#define CL_FALSE 0 +#define CL_TRUE 1 +#define CL_BLOCKING CL_TRUE +#define CL_NON_BLOCKING CL_FALSE + +/* cl_platform_info */ +#define CL_PLATFORM_PROFILE 0x0900 +#define CL_PLATFORM_VERSION 0x0901 +#define CL_PLATFORM_NAME 0x0902 +#define CL_PLATFORM_VENDOR 0x0903 +#define CL_PLATFORM_EXTENSIONS 0x0904 +#define CL_PLATFORM_HOST_TIMER_RESOLUTION 0x0905 + +/* cl_device_type - bitfield */ +#define CL_DEVICE_TYPE_DEFAULT (1 << 0) +#define CL_DEVICE_TYPE_CPU (1 << 1) +#define CL_DEVICE_TYPE_GPU (1 << 2) +#define CL_DEVICE_TYPE_ACCELERATOR (1 << 3) +#define CL_DEVICE_TYPE_CUSTOM (1 << 4) +#define CL_DEVICE_TYPE_ALL 0xFFFFFFFF + +/* cl_device_info */ +#define CL_DEVICE_TYPE 0x1000 +#define CL_DEVICE_VENDOR_ID 0x1001 +#define CL_DEVICE_MAX_COMPUTE_UNITS 0x1002 +#define CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS 0x1003 +#define CL_DEVICE_MAX_WORK_GROUP_SIZE 0x1004 +#define CL_DEVICE_MAX_WORK_ITEM_SIZES 0x1005 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR 0x1006 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT 0x1007 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT 0x1008 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG 0x1009 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT 0x100A +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE 0x100B +#define CL_DEVICE_MAX_CLOCK_FREQUENCY 0x100C +#define CL_DEVICE_ADDRESS_BITS 0x100D +#define CL_DEVICE_MAX_READ_IMAGE_ARGS 0x100E +#define CL_DEVICE_MAX_WRITE_IMAGE_ARGS 0x100F +#define CL_DEVICE_MAX_MEM_ALLOC_SIZE 0x1010 +#define CL_DEVICE_IMAGE2D_MAX_WIDTH 0x1011 +#define CL_DEVICE_IMAGE2D_MAX_HEIGHT 0x1012 +#define CL_DEVICE_IMAGE3D_MAX_WIDTH 0x1013 +#define CL_DEVICE_IMAGE3D_MAX_HEIGHT 0x1014 +#define CL_DEVICE_IMAGE3D_MAX_DEPTH 0x1015 +#define CL_DEVICE_IMAGE_SUPPORT 0x1016 +#define CL_DEVICE_MAX_PARAMETER_SIZE 0x1017 +#define CL_DEVICE_MAX_SAMPLERS 0x1018 +#define CL_DEVICE_MEM_BASE_ADDR_ALIGN 0x1019 +#define CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE 0x101A +#define CL_DEVICE_SINGLE_FP_CONFIG 0x101B +#define CL_DEVICE_GLOBAL_MEM_CACHE_TYPE 0x101C +#define CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE 0x101D +#define CL_DEVICE_GLOBAL_MEM_CACHE_SIZE 0x101E +#define CL_DEVICE_GLOBAL_MEM_SIZE 0x101F +#define CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE 0x1020 +#define CL_DEVICE_MAX_CONSTANT_ARGS 0x1021 +#define CL_DEVICE_LOCAL_MEM_TYPE 0x1022 +#define CL_DEVICE_LOCAL_MEM_SIZE 0x1023 +#define CL_DEVICE_ERROR_CORRECTION_SUPPORT 0x1024 +#define CL_DEVICE_PROFILING_TIMER_RESOLUTION 0x1025 +#define CL_DEVICE_ENDIAN_LITTLE 0x1026 +#define CL_DEVICE_AVAILABLE 0x1027 +#define CL_DEVICE_COMPILER_AVAILABLE 0x1028 +#define CL_DEVICE_EXECUTION_CAPABILITIES 0x1029 +#define CL_DEVICE_QUEUE_PROPERTIES 0x102A /* deprecated */ +#define CL_DEVICE_QUEUE_ON_HOST_PROPERTIES 0x102A +#define CL_DEVICE_NAME 0x102B +#define CL_DEVICE_VENDOR 0x102C +#define CL_DRIVER_VERSION 0x102D +#define CL_DEVICE_PROFILE 0x102E +#define CL_DEVICE_VERSION 0x102F +#define CL_DEVICE_EXTENSIONS 0x1030 +#define CL_DEVICE_PLATFORM 0x1031 +#define CL_DEVICE_DOUBLE_FP_CONFIG 0x1032 +#define CL_DEVICE_HALF_FP_CONFIG 0x1033 +#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF 0x1034 +#define CL_DEVICE_HOST_UNIFIED_MEMORY 0x1035 /* deprecated */ +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR 0x1036 +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT 0x1037 +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_INT 0x1038 +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG 0x1039 +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT 0x103A +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE 0x103B +#define CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF 0x103C +#define CL_DEVICE_OPENCL_C_VERSION 0x103D +#define CL_DEVICE_LINKER_AVAILABLE 0x103E +#define CL_DEVICE_BUILT_IN_KERNELS 0x103F +#define CL_DEVICE_IMAGE_MAX_BUFFER_SIZE 0x1040 +#define CL_DEVICE_IMAGE_MAX_ARRAY_SIZE 0x1041 +#define CL_DEVICE_PARENT_DEVICE 0x1042 +#define CL_DEVICE_PARTITION_MAX_SUB_DEVICES 0x1043 +#define CL_DEVICE_PARTITION_PROPERTIES 0x1044 +#define CL_DEVICE_PARTITION_AFFINITY_DOMAIN 0x1045 +#define CL_DEVICE_PARTITION_TYPE 0x1046 +#define CL_DEVICE_REFERENCE_COUNT 0x1047 +#define CL_DEVICE_PREFERRED_INTEROP_USER_SYNC 0x1048 +#define CL_DEVICE_PRINTF_BUFFER_SIZE 0x1049 +#define CL_DEVICE_IMAGE_PITCH_ALIGNMENT 0x104A +#define CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT 0x104B +#define CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS 0x104C +#define CL_DEVICE_MAX_GLOBAL_VARIABLE_SIZE 0x104D +#define CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES 0x104E +#define CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE 0x104F +#define CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE 0x1050 +#define CL_DEVICE_MAX_ON_DEVICE_QUEUES 0x1051 +#define CL_DEVICE_MAX_ON_DEVICE_EVENTS 0x1052 +#define CL_DEVICE_SVM_CAPABILITIES 0x1053 +#define CL_DEVICE_GLOBAL_VARIABLE_PREFERRED_TOTAL_SIZE 0x1054 +#define CL_DEVICE_MAX_PIPE_ARGS 0x1055 +#define CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS 0x1056 +#define CL_DEVICE_PIPE_MAX_PACKET_SIZE 0x1057 +#define CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT 0x1058 +#define CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT 0x1059 +#define CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT 0x105A +#define CL_DEVICE_IL_VERSION 0x105B +#define CL_DEVICE_MAX_NUM_SUB_GROUPS 0x105C +#define CL_DEVICE_SUB_GROUP_INDEPENDENT_FORWARD_PROGRESS 0x105D + +/* cl_device_fp_config - bitfield */ +#define CL_FP_DENORM (1 << 0) +#define CL_FP_INF_NAN (1 << 1) +#define CL_FP_ROUND_TO_NEAREST (1 << 2) +#define CL_FP_ROUND_TO_ZERO (1 << 3) +#define CL_FP_ROUND_TO_INF (1 << 4) +#define CL_FP_FMA (1 << 5) +#define CL_FP_SOFT_FLOAT (1 << 6) +#define CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT (1 << 7) + +/* cl_device_mem_cache_type */ +#define CL_NONE 0x0 +#define CL_READ_ONLY_CACHE 0x1 +#define CL_READ_WRITE_CACHE 0x2 + +/* cl_device_local_mem_type */ +#define CL_LOCAL 0x1 +#define CL_GLOBAL 0x2 + +/* cl_device_exec_capabilities - bitfield */ +#define CL_EXEC_KERNEL (1 << 0) +#define CL_EXEC_NATIVE_KERNEL (1 << 1) + +/* cl_command_queue_properties - bitfield */ +#define CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE (1 << 0) +#define CL_QUEUE_PROFILING_ENABLE (1 << 1) +#define CL_QUEUE_ON_DEVICE (1 << 2) +#define CL_QUEUE_ON_DEVICE_DEFAULT (1 << 3) + +/* cl_context_info */ +#define CL_CONTEXT_REFERENCE_COUNT 0x1080 +#define CL_CONTEXT_DEVICES 0x1081 +#define CL_CONTEXT_PROPERTIES 0x1082 +#define CL_CONTEXT_NUM_DEVICES 0x1083 + +/* cl_context_properties */ +#define CL_CONTEXT_PLATFORM 0x1084 +#define CL_CONTEXT_INTEROP_USER_SYNC 0x1085 + +/* cl_device_partition_property */ +#define CL_DEVICE_PARTITION_EQUALLY 0x1086 +#define CL_DEVICE_PARTITION_BY_COUNTS 0x1087 +#define CL_DEVICE_PARTITION_BY_COUNTS_LIST_END 0x0 +#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN 0x1088 + +/* cl_device_affinity_domain */ +#define CL_DEVICE_AFFINITY_DOMAIN_NUMA (1 << 0) +#define CL_DEVICE_AFFINITY_DOMAIN_L4_CACHE (1 << 1) +#define CL_DEVICE_AFFINITY_DOMAIN_L3_CACHE (1 << 2) +#define CL_DEVICE_AFFINITY_DOMAIN_L2_CACHE (1 << 3) +#define CL_DEVICE_AFFINITY_DOMAIN_L1_CACHE (1 << 4) +#define CL_DEVICE_AFFINITY_DOMAIN_NEXT_PARTITIONABLE (1 << 5) + +/* cl_device_svm_capabilities */ +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM (1 << 2) +#define CL_DEVICE_SVM_ATOMICS (1 << 3) + +/* cl_command_queue_info */ +#define CL_QUEUE_CONTEXT 0x1090 +#define CL_QUEUE_DEVICE 0x1091 +#define CL_QUEUE_REFERENCE_COUNT 0x1092 +#define CL_QUEUE_PROPERTIES 0x1093 +#define CL_QUEUE_SIZE 0x1094 +#define CL_QUEUE_DEVICE_DEFAULT 0x1095 + +/* cl_mem_flags and cl_svm_mem_flags - bitfield */ +#define CL_MEM_READ_WRITE (1 << 0) +#define CL_MEM_WRITE_ONLY (1 << 1) +#define CL_MEM_READ_ONLY (1 << 2) +#define CL_MEM_USE_HOST_PTR (1 << 3) +#define CL_MEM_ALLOC_HOST_PTR (1 << 4) +#define CL_MEM_COPY_HOST_PTR (1 << 5) +/* reserved (1 << 6) */ +#define CL_MEM_HOST_WRITE_ONLY (1 << 7) +#define CL_MEM_HOST_READ_ONLY (1 << 8) +#define CL_MEM_HOST_NO_ACCESS (1 << 9) +#define CL_MEM_SVM_FINE_GRAIN_BUFFER (1 << 10) /* used by cl_svm_mem_flags only */ +#define CL_MEM_SVM_ATOMICS (1 << 11) /* used by cl_svm_mem_flags only */ +#define CL_MEM_KERNEL_READ_AND_WRITE (1 << 12) + +/* cl_mem_migration_flags - bitfield */ +#define CL_MIGRATE_MEM_OBJECT_HOST (1 << 0) +#define CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED (1 << 1) + +/* cl_channel_order */ +#define CL_R 0x10B0 +#define CL_A 0x10B1 +#define CL_RG 0x10B2 +#define CL_RA 0x10B3 +#define CL_RGB 0x10B4 +#define CL_RGBA 0x10B5 +#define CL_BGRA 0x10B6 +#define CL_ARGB 0x10B7 +#define CL_INTENSITY 0x10B8 +#define CL_LUMINANCE 0x10B9 +#define CL_Rx 0x10BA +#define CL_RGx 0x10BB +#define CL_RGBx 0x10BC +#define CL_DEPTH 0x10BD +#define CL_DEPTH_STENCIL 0x10BE +#define CL_sRGB 0x10BF +#define CL_sRGBx 0x10C0 +#define CL_sRGBA 0x10C1 +#define CL_sBGRA 0x10C2 +#define CL_ABGR 0x10C3 + +/* cl_channel_type */ +#define CL_SNORM_INT8 0x10D0 +#define CL_SNORM_INT16 0x10D1 +#define CL_UNORM_INT8 0x10D2 +#define CL_UNORM_INT16 0x10D3 +#define CL_UNORM_SHORT_565 0x10D4 +#define CL_UNORM_SHORT_555 0x10D5 +#define CL_UNORM_INT_101010 0x10D6 +#define CL_SIGNED_INT8 0x10D7 +#define CL_SIGNED_INT16 0x10D8 +#define CL_SIGNED_INT32 0x10D9 +#define CL_UNSIGNED_INT8 0x10DA +#define CL_UNSIGNED_INT16 0x10DB +#define CL_UNSIGNED_INT32 0x10DC +#define CL_HALF_FLOAT 0x10DD +#define CL_FLOAT 0x10DE +#define CL_UNORM_INT24 0x10DF +#define CL_UNORM_INT_101010_2 0x10E0 + +/* cl_mem_object_type */ +#define CL_MEM_OBJECT_BUFFER 0x10F0 +#define CL_MEM_OBJECT_IMAGE2D 0x10F1 +#define CL_MEM_OBJECT_IMAGE3D 0x10F2 +#define CL_MEM_OBJECT_IMAGE2D_ARRAY 0x10F3 +#define CL_MEM_OBJECT_IMAGE1D 0x10F4 +#define CL_MEM_OBJECT_IMAGE1D_ARRAY 0x10F5 +#define CL_MEM_OBJECT_IMAGE1D_BUFFER 0x10F6 +#define CL_MEM_OBJECT_PIPE 0x10F7 + +/* cl_mem_info */ +#define CL_MEM_TYPE 0x1100 +#define CL_MEM_FLAGS 0x1101 +#define CL_MEM_SIZE 0x1102 +#define CL_MEM_HOST_PTR 0x1103 +#define CL_MEM_MAP_COUNT 0x1104 +#define CL_MEM_REFERENCE_COUNT 0x1105 +#define CL_MEM_CONTEXT 0x1106 +#define CL_MEM_ASSOCIATED_MEMOBJECT 0x1107 +#define CL_MEM_OFFSET 0x1108 +#define CL_MEM_USES_SVM_POINTER 0x1109 + +/* cl_image_info */ +#define CL_IMAGE_FORMAT 0x1110 +#define CL_IMAGE_ELEMENT_SIZE 0x1111 +#define CL_IMAGE_ROW_PITCH 0x1112 +#define CL_IMAGE_SLICE_PITCH 0x1113 +#define CL_IMAGE_WIDTH 0x1114 +#define CL_IMAGE_HEIGHT 0x1115 +#define CL_IMAGE_DEPTH 0x1116 +#define CL_IMAGE_ARRAY_SIZE 0x1117 +#define CL_IMAGE_BUFFER 0x1118 +#define CL_IMAGE_NUM_MIP_LEVELS 0x1119 +#define CL_IMAGE_NUM_SAMPLES 0x111A + +/* cl_pipe_info */ +#define CL_PIPE_PACKET_SIZE 0x1120 +#define CL_PIPE_MAX_PACKETS 0x1121 + +/* cl_addressing_mode */ +#define CL_ADDRESS_NONE 0x1130 +#define CL_ADDRESS_CLAMP_TO_EDGE 0x1131 +#define CL_ADDRESS_CLAMP 0x1132 +#define CL_ADDRESS_REPEAT 0x1133 +#define CL_ADDRESS_MIRRORED_REPEAT 0x1134 + +/* cl_filter_mode */ +#define CL_FILTER_NEAREST 0x1140 +#define CL_FILTER_LINEAR 0x1141 + +/* cl_sampler_info */ +#define CL_SAMPLER_REFERENCE_COUNT 0x1150 +#define CL_SAMPLER_CONTEXT 0x1151 +#define CL_SAMPLER_NORMALIZED_COORDS 0x1152 +#define CL_SAMPLER_ADDRESSING_MODE 0x1153 +#define CL_SAMPLER_FILTER_MODE 0x1154 +#define CL_SAMPLER_MIP_FILTER_MODE 0x1155 +#define CL_SAMPLER_LOD_MIN 0x1156 +#define CL_SAMPLER_LOD_MAX 0x1157 + +/* cl_map_flags - bitfield */ +#define CL_MAP_READ (1 << 0) +#define CL_MAP_WRITE (1 << 1) +#define CL_MAP_WRITE_INVALIDATE_REGION (1 << 2) + +/* cl_program_info */ +#define CL_PROGRAM_REFERENCE_COUNT 0x1160 +#define CL_PROGRAM_CONTEXT 0x1161 +#define CL_PROGRAM_NUM_DEVICES 0x1162 +#define CL_PROGRAM_DEVICES 0x1163 +#define CL_PROGRAM_SOURCE 0x1164 +#define CL_PROGRAM_BINARY_SIZES 0x1165 +#define CL_PROGRAM_BINARIES 0x1166 +#define CL_PROGRAM_NUM_KERNELS 0x1167 +#define CL_PROGRAM_KERNEL_NAMES 0x1168 +#define CL_PROGRAM_IL 0x1169 +#define CL_PROGRAM_SCOPE_GLOBAL_CTORS_PRESENT 0x116A +#define CL_PROGRAM_SCOPE_GLOBAL_DTORS_PRESENT 0x116B + +/* cl_program_build_info */ +#define CL_PROGRAM_BUILD_STATUS 0x1181 +#define CL_PROGRAM_BUILD_OPTIONS 0x1182 +#define CL_PROGRAM_BUILD_LOG 0x1183 +#define CL_PROGRAM_BINARY_TYPE 0x1184 +#define CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE 0x1185 + +/* cl_program_binary_type */ +#define CL_PROGRAM_BINARY_TYPE_NONE 0x0 +#define CL_PROGRAM_BINARY_TYPE_COMPILED_OBJECT 0x1 +#define CL_PROGRAM_BINARY_TYPE_LIBRARY 0x2 +#define CL_PROGRAM_BINARY_TYPE_EXECUTABLE 0x4 + +/* cl_build_status */ +#define CL_BUILD_SUCCESS 0 +#define CL_BUILD_NONE -1 +#define CL_BUILD_ERROR -2 +#define CL_BUILD_IN_PROGRESS -3 + +/* cl_kernel_info */ +#define CL_KERNEL_FUNCTION_NAME 0x1190 +#define CL_KERNEL_NUM_ARGS 0x1191 +#define CL_KERNEL_REFERENCE_COUNT 0x1192 +#define CL_KERNEL_CONTEXT 0x1193 +#define CL_KERNEL_PROGRAM 0x1194 +#define CL_KERNEL_ATTRIBUTES 0x1195 +#define CL_KERNEL_MAX_NUM_SUB_GROUPS 0x11B9 +#define CL_KERNEL_COMPILE_NUM_SUB_GROUPS 0x11BA + +/* cl_kernel_arg_info */ +#define CL_KERNEL_ARG_ADDRESS_QUALIFIER 0x1196 +#define CL_KERNEL_ARG_ACCESS_QUALIFIER 0x1197 +#define CL_KERNEL_ARG_TYPE_NAME 0x1198 +#define CL_KERNEL_ARG_TYPE_QUALIFIER 0x1199 +#define CL_KERNEL_ARG_NAME 0x119A + +/* cl_kernel_arg_address_qualifier */ +#define CL_KERNEL_ARG_ADDRESS_GLOBAL 0x119B +#define CL_KERNEL_ARG_ADDRESS_LOCAL 0x119C +#define CL_KERNEL_ARG_ADDRESS_CONSTANT 0x119D +#define CL_KERNEL_ARG_ADDRESS_PRIVATE 0x119E + +/* cl_kernel_arg_access_qualifier */ +#define CL_KERNEL_ARG_ACCESS_READ_ONLY 0x11A0 +#define CL_KERNEL_ARG_ACCESS_WRITE_ONLY 0x11A1 +#define CL_KERNEL_ARG_ACCESS_READ_WRITE 0x11A2 +#define CL_KERNEL_ARG_ACCESS_NONE 0x11A3 + +/* cl_kernel_arg_type_qualifier */ +#define CL_KERNEL_ARG_TYPE_NONE 0 +#define CL_KERNEL_ARG_TYPE_CONST (1 << 0) +#define CL_KERNEL_ARG_TYPE_RESTRICT (1 << 1) +#define CL_KERNEL_ARG_TYPE_VOLATILE (1 << 2) +#define CL_KERNEL_ARG_TYPE_PIPE (1 << 3) + +/* cl_kernel_work_group_info */ +#define CL_KERNEL_WORK_GROUP_SIZE 0x11B0 +#define CL_KERNEL_COMPILE_WORK_GROUP_SIZE 0x11B1 +#define CL_KERNEL_LOCAL_MEM_SIZE 0x11B2 +#define CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE 0x11B3 +#define CL_KERNEL_PRIVATE_MEM_SIZE 0x11B4 +#define CL_KERNEL_GLOBAL_WORK_SIZE 0x11B5 + +/* cl_kernel_sub_group_info */ +#define CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE 0x2033 +#define CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE 0x2034 +#define CL_KERNEL_LOCAL_SIZE_FOR_SUB_GROUP_COUNT 0x11B8 + +/* cl_kernel_exec_info */ +#define CL_KERNEL_EXEC_INFO_SVM_PTRS 0x11B6 +#define CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM 0x11B7 + +/* cl_event_info */ +#define CL_EVENT_COMMAND_QUEUE 0x11D0 +#define CL_EVENT_COMMAND_TYPE 0x11D1 +#define CL_EVENT_REFERENCE_COUNT 0x11D2 +#define CL_EVENT_COMMAND_EXECUTION_STATUS 0x11D3 +#define CL_EVENT_CONTEXT 0x11D4 + +/* cl_command_type */ +#define CL_COMMAND_NDRANGE_KERNEL 0x11F0 +#define CL_COMMAND_TASK 0x11F1 +#define CL_COMMAND_NATIVE_KERNEL 0x11F2 +#define CL_COMMAND_READ_BUFFER 0x11F3 +#define CL_COMMAND_WRITE_BUFFER 0x11F4 +#define CL_COMMAND_COPY_BUFFER 0x11F5 +#define CL_COMMAND_READ_IMAGE 0x11F6 +#define CL_COMMAND_WRITE_IMAGE 0x11F7 +#define CL_COMMAND_COPY_IMAGE 0x11F8 +#define CL_COMMAND_COPY_IMAGE_TO_BUFFER 0x11F9 +#define CL_COMMAND_COPY_BUFFER_TO_IMAGE 0x11FA +#define CL_COMMAND_MAP_BUFFER 0x11FB +#define CL_COMMAND_MAP_IMAGE 0x11FC +#define CL_COMMAND_UNMAP_MEM_OBJECT 0x11FD +#define CL_COMMAND_MARKER 0x11FE +#define CL_COMMAND_ACQUIRE_GL_OBJECTS 0x11FF +#define CL_COMMAND_RELEASE_GL_OBJECTS 0x1200 +#define CL_COMMAND_READ_BUFFER_RECT 0x1201 +#define CL_COMMAND_WRITE_BUFFER_RECT 0x1202 +#define CL_COMMAND_COPY_BUFFER_RECT 0x1203 +#define CL_COMMAND_USER 0x1204 +#define CL_COMMAND_BARRIER 0x1205 +#define CL_COMMAND_MIGRATE_MEM_OBJECTS 0x1206 +#define CL_COMMAND_FILL_BUFFER 0x1207 +#define CL_COMMAND_FILL_IMAGE 0x1208 +#define CL_COMMAND_SVM_FREE 0x1209 +#define CL_COMMAND_SVM_MEMCPY 0x120A +#define CL_COMMAND_SVM_MEMFILL 0x120B +#define CL_COMMAND_SVM_MAP 0x120C +#define CL_COMMAND_SVM_UNMAP 0x120D + +/* command execution status */ +#define CL_COMPLETE 0x0 +#define CL_RUNNING 0x1 +#define CL_SUBMITTED 0x2 +#define CL_QUEUED 0x3 + +/* cl_buffer_create_type */ +#define CL_BUFFER_CREATE_TYPE_REGION 0x1220 + +/* cl_profiling_info */ +#define CL_PROFILING_COMMAND_QUEUED 0x1280 +#define CL_PROFILING_COMMAND_SUBMIT 0x1281 +#define CL_PROFILING_COMMAND_START 0x1282 +#define CL_PROFILING_COMMAND_END 0x1283 +#define CL_PROFILING_COMMAND_COMPLETE 0x1284 + +#ifndef CL_NO_PROTOTYPES +/********************************************************************************************************/ + +/* Platform API */ +extern CL_API_ENTRY cl_int CL_API_CALL +clGetPlatformIDs(cl_uint /* num_entries */, + cl_platform_id * /* platforms */, + cl_uint * /* num_platforms */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetPlatformInfo(cl_platform_id /* platform */, + cl_platform_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +/* Device APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceIDs(cl_platform_id /* platform */, + cl_device_type /* device_type */, + cl_uint /* num_entries */, + cl_device_id * /* devices */, + cl_uint * /* num_devices */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceInfo(cl_device_id /* device */, + cl_device_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCreateSubDevices(cl_device_id /* in_device */, + const cl_device_partition_property * /* properties */, + cl_uint /* num_devices */, + cl_device_id * /* out_devices */, + cl_uint * /* num_devices_ret */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainDevice(cl_device_id /* device */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseDevice(cl_device_id /* device */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetDefaultDeviceCommandQueue(cl_context /* context */, + cl_device_id /* device */, + cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_2_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceAndHostTimer(cl_device_id /* device */, + cl_ulong* /* device_timestamp */, + cl_ulong* /* host_timestamp */) CL_API_SUFFIX__VERSION_2_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetHostTimer(cl_device_id /* device */, + cl_ulong * /* host_timestamp */) CL_API_SUFFIX__VERSION_2_1; + + +/* Context APIs */ +extern CL_API_ENTRY cl_context CL_API_CALL +clCreateContext(const cl_context_properties * /* properties */, + cl_uint /* num_devices */, + const cl_device_id * /* devices */, + void (CL_CALLBACK * /* pfn_notify */)(const char *, const void *, size_t, void *), + void * /* user_data */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_context CL_API_CALL +clCreateContextFromType(const cl_context_properties * /* properties */, + cl_device_type /* device_type */, + void (CL_CALLBACK * /* pfn_notify*/ )(const char *, const void *, size_t, void *), + void * /* user_data */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetContextInfo(cl_context /* context */, + cl_context_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +/* Command Queue APIs */ +extern CL_API_ENTRY cl_command_queue CL_API_CALL +clCreateCommandQueueWithProperties(cl_context /* context */, + cl_device_id /* device */, + const cl_queue_properties * /* properties */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetCommandQueueInfo(cl_command_queue /* command_queue */, + cl_command_queue_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +/* Memory Object APIs */ +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateBuffer(cl_context /* context */, + cl_mem_flags /* flags */, + size_t /* size */, + void * /* host_ptr */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateSubBuffer(cl_mem /* buffer */, + cl_mem_flags /* flags */, + cl_buffer_create_type /* buffer_create_type */, + const void * /* buffer_create_info */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateImage(cl_context /* context */, + cl_mem_flags /* flags */, + const cl_image_format * /* image_format */, + const cl_image_desc * /* image_desc */, + void * /* host_ptr */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreatePipe(cl_context /* context */, + cl_mem_flags /* flags */, + cl_uint /* pipe_packet_size */, + cl_uint /* pipe_max_packets */, + const cl_pipe_properties * /* properties */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSupportedImageFormats(cl_context /* context */, + cl_mem_flags /* flags */, + cl_mem_object_type /* image_type */, + cl_uint /* num_entries */, + cl_image_format * /* image_formats */, + cl_uint * /* num_image_formats */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetMemObjectInfo(cl_mem /* memobj */, + cl_mem_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetImageInfo(cl_mem /* image */, + cl_image_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetPipeInfo(cl_mem /* pipe */, + cl_pipe_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_2_0; + + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetMemObjectDestructorCallback(cl_mem /* memobj */, + void (CL_CALLBACK * /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/), + void * /*user_data */ ) CL_API_SUFFIX__VERSION_1_1; + +/* SVM Allocation APIs */ +extern CL_API_ENTRY void * CL_API_CALL +clSVMAlloc(cl_context /* context */, + cl_svm_mem_flags /* flags */, + size_t /* size */, + cl_uint /* alignment */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY void CL_API_CALL +clSVMFree(cl_context /* context */, + void * /* svm_pointer */) CL_API_SUFFIX__VERSION_2_0; + +/* Sampler APIs */ +extern CL_API_ENTRY cl_sampler CL_API_CALL +clCreateSamplerWithProperties(cl_context /* context */, + const cl_sampler_properties * /* normalized_coords */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetSamplerInfo(cl_sampler /* sampler */, + cl_sampler_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +/* Program Object APIs */ +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithSource(cl_context /* context */, + cl_uint /* count */, + const char ** /* strings */, + const size_t * /* lengths */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithBinary(cl_context /* context */, + cl_uint /* num_devices */, + const cl_device_id * /* device_list */, + const size_t * /* lengths */, + const unsigned char ** /* binaries */, + cl_int * /* binary_status */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithBuiltInKernels(cl_context /* context */, + cl_uint /* num_devices */, + const cl_device_id * /* device_list */, + const char * /* kernel_names */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_program CL_API_CALL +clCreateProgramWithIL(cl_context /* context */, + const void* /* il */, + size_t /* length */, + cl_int* /* errcode_ret */) CL_API_SUFFIX__VERSION_2_1; + + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clBuildProgram(cl_program /* program */, + cl_uint /* num_devices */, + const cl_device_id * /* device_list */, + const char * /* options */, + void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */), + void * /* user_data */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCompileProgram(cl_program /* program */, + cl_uint /* num_devices */, + const cl_device_id * /* device_list */, + const char * /* options */, + cl_uint /* num_input_headers */, + const cl_program * /* input_headers */, + const char ** /* header_include_names */, + void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */), + void * /* user_data */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_program CL_API_CALL +clLinkProgram(cl_context /* context */, + cl_uint /* num_devices */, + const cl_device_id * /* device_list */, + const char * /* options */, + cl_uint /* num_input_programs */, + const cl_program * /* input_programs */, + void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */), + void * /* user_data */, + cl_int * /* errcode_ret */ ) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetProgramReleaseCallback(cl_program /* program */, + void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */), + void * /* user_data */) CL_API_SUFFIX__VERSION_2_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetProgramSpecializationConstant(cl_program /* program */, + cl_uint /* spec_id */, + size_t /* spec_size */, + const void* /* spec_value */) CL_API_SUFFIX__VERSION_2_2; + + +extern CL_API_ENTRY cl_int CL_API_CALL +clUnloadPlatformCompiler(cl_platform_id /* platform */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetProgramInfo(cl_program /* program */, + cl_program_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetProgramBuildInfo(cl_program /* program */, + cl_device_id /* device */, + cl_program_build_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +/* Kernel Object APIs */ +extern CL_API_ENTRY cl_kernel CL_API_CALL +clCreateKernel(cl_program /* program */, + const char * /* kernel_name */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clCreateKernelsInProgram(cl_program /* program */, + cl_uint /* num_kernels */, + cl_kernel * /* kernels */, + cl_uint * /* num_kernels_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_kernel CL_API_CALL +clCloneKernel(cl_kernel /* source_kernel */, + cl_int* /* errcode_ret */) CL_API_SUFFIX__VERSION_2_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainKernel(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseKernel(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelArg(cl_kernel /* kernel */, + cl_uint /* arg_index */, + size_t /* arg_size */, + const void * /* arg_value */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelArgSVMPointer(cl_kernel /* kernel */, + cl_uint /* arg_index */, + const void * /* arg_value */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelExecInfo(cl_kernel /* kernel */, + cl_kernel_exec_info /* param_name */, + size_t /* param_value_size */, + const void * /* param_value */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelInfo(cl_kernel /* kernel */, + cl_kernel_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelArgInfo(cl_kernel /* kernel */, + cl_uint /* arg_indx */, + cl_kernel_arg_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelWorkGroupInfo(cl_kernel /* kernel */, + cl_device_id /* device */, + cl_kernel_work_group_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelSubGroupInfo(cl_kernel /* kernel */, + cl_device_id /* device */, + cl_kernel_sub_group_info /* param_name */, + size_t /* input_value_size */, + const void* /*input_value */, + size_t /* param_value_size */, + void* /* param_value */, + size_t* /* param_value_size_ret */ ) CL_API_SUFFIX__VERSION_2_1; + + +/* Event Object APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clWaitForEvents(cl_uint /* num_events */, + const cl_event * /* event_list */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetEventInfo(cl_event /* event */, + cl_event_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_event CL_API_CALL +clCreateUserEvent(cl_context /* context */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clRetainEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clReleaseEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetUserEventStatus(cl_event /* event */, + cl_int /* execution_status */) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetEventCallback( cl_event /* event */, + cl_int /* command_exec_callback_type */, + void (CL_CALLBACK * /* pfn_notify */)(cl_event, cl_int, void *), + void * /* user_data */) CL_API_SUFFIX__VERSION_1_1; + +/* Profiling APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clGetEventProfilingInfo(cl_event /* event */, + cl_profiling_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +/* Flush and Finish APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clFlush(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clFinish(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; + +/* Enqueued Commands APIs */ +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReadBuffer(cl_command_queue /* command_queue */, + cl_mem /* buffer */, + cl_bool /* blocking_read */, + size_t /* offset */, + size_t /* size */, + void * /* ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReadBufferRect(cl_command_queue /* command_queue */, + cl_mem /* buffer */, + cl_bool /* blocking_read */, + const size_t * /* buffer_offset */, + const size_t * /* host_offset */, + const size_t * /* region */, + size_t /* buffer_row_pitch */, + size_t /* buffer_slice_pitch */, + size_t /* host_row_pitch */, + size_t /* host_slice_pitch */, + void * /* ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueWriteBuffer(cl_command_queue /* command_queue */, + cl_mem /* buffer */, + cl_bool /* blocking_write */, + size_t /* offset */, + size_t /* size */, + const void * /* ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueWriteBufferRect(cl_command_queue /* command_queue */, + cl_mem /* buffer */, + cl_bool /* blocking_write */, + const size_t * /* buffer_offset */, + const size_t * /* host_offset */, + const size_t * /* region */, + size_t /* buffer_row_pitch */, + size_t /* buffer_slice_pitch */, + size_t /* host_row_pitch */, + size_t /* host_slice_pitch */, + const void * /* ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueFillBuffer(cl_command_queue /* command_queue */, + cl_mem /* buffer */, + const void * /* pattern */, + size_t /* pattern_size */, + size_t /* offset */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyBuffer(cl_command_queue /* command_queue */, + cl_mem /* src_buffer */, + cl_mem /* dst_buffer */, + size_t /* src_offset */, + size_t /* dst_offset */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyBufferRect(cl_command_queue /* command_queue */, + cl_mem /* src_buffer */, + cl_mem /* dst_buffer */, + const size_t * /* src_origin */, + const size_t * /* dst_origin */, + const size_t * /* region */, + size_t /* src_row_pitch */, + size_t /* src_slice_pitch */, + size_t /* dst_row_pitch */, + size_t /* dst_slice_pitch */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReadImage(cl_command_queue /* command_queue */, + cl_mem /* image */, + cl_bool /* blocking_read */, + const size_t * /* origin[3] */, + const size_t * /* region[3] */, + size_t /* row_pitch */, + size_t /* slice_pitch */, + void * /* ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueWriteImage(cl_command_queue /* command_queue */, + cl_mem /* image */, + cl_bool /* blocking_write */, + const size_t * /* origin[3] */, + const size_t * /* region[3] */, + size_t /* input_row_pitch */, + size_t /* input_slice_pitch */, + const void * /* ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueFillImage(cl_command_queue /* command_queue */, + cl_mem /* image */, + const void * /* fill_color */, + const size_t * /* origin[3] */, + const size_t * /* region[3] */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyImage(cl_command_queue /* command_queue */, + cl_mem /* src_image */, + cl_mem /* dst_image */, + const size_t * /* src_origin[3] */, + const size_t * /* dst_origin[3] */, + const size_t * /* region[3] */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyImageToBuffer(cl_command_queue /* command_queue */, + cl_mem /* src_image */, + cl_mem /* dst_buffer */, + const size_t * /* src_origin[3] */, + const size_t * /* region[3] */, + size_t /* dst_offset */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueCopyBufferToImage(cl_command_queue /* command_queue */, + cl_mem /* src_buffer */, + cl_mem /* dst_image */, + size_t /* src_offset */, + const size_t * /* dst_origin[3] */, + const size_t * /* region[3] */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY void * CL_API_CALL +clEnqueueMapBuffer(cl_command_queue /* command_queue */, + cl_mem /* buffer */, + cl_bool /* blocking_map */, + cl_map_flags /* map_flags */, + size_t /* offset */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY void * CL_API_CALL +clEnqueueMapImage(cl_command_queue /* command_queue */, + cl_mem /* image */, + cl_bool /* blocking_map */, + cl_map_flags /* map_flags */, + const size_t * /* origin[3] */, + const size_t * /* region[3] */, + size_t * /* image_row_pitch */, + size_t * /* image_slice_pitch */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueUnmapMemObject(cl_command_queue /* command_queue */, + cl_mem /* memobj */, + void * /* mapped_ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMigrateMemObjects(cl_command_queue /* command_queue */, + cl_uint /* num_mem_objects */, + const cl_mem * /* mem_objects */, + cl_mem_migration_flags /* flags */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueNDRangeKernel(cl_command_queue /* command_queue */, + cl_kernel /* kernel */, + cl_uint /* work_dim */, + const size_t * /* global_work_offset */, + const size_t * /* global_work_size */, + const size_t * /* local_work_size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueNativeKernel(cl_command_queue /* command_queue */, + void (CL_CALLBACK * /*user_func*/)(void *), + void * /* args */, + size_t /* cb_args */, + cl_uint /* num_mem_objects */, + const cl_mem * /* mem_list */, + const void ** /* args_mem_loc */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueMarkerWithWaitList(cl_command_queue /* command_queue */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueBarrierWithWaitList(cl_command_queue /* command_queue */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMFree(cl_command_queue /* command_queue */, + cl_uint /* num_svm_pointers */, + void *[] /* svm_pointers[] */, + void (CL_CALLBACK * /*pfn_free_func*/)(cl_command_queue /* queue */, + cl_uint /* num_svm_pointers */, + void *[] /* svm_pointers[] */, + void * /* user_data */), + void * /* user_data */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMemcpy(cl_command_queue /* command_queue */, + cl_bool /* blocking_copy */, + void * /* dst_ptr */, + const void * /* src_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMemFill(cl_command_queue /* command_queue */, + void * /* svm_ptr */, + const void * /* pattern */, + size_t /* pattern_size */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMap(cl_command_queue /* command_queue */, + cl_bool /* blocking_map */, + cl_map_flags /* flags */, + void * /* svm_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMUnmap(cl_command_queue /* command_queue */, + void * /* svm_ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_2_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMigrateMem(cl_command_queue /* command_queue */, + cl_uint /* num_svm_pointers */, + const void ** /* svm_pointers */, + const size_t * /* sizes */, + cl_mem_migration_flags /* flags */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_2_1; + + +/* Extension function access + * + * Returns the extension function address for the given function name, + * or NULL if a valid function can not be found. The client must + * check to make sure the address is not NULL, before using or + * calling the returned function address. + */ +extern CL_API_ENTRY void * CL_API_CALL +clGetExtensionFunctionAddressForPlatform(cl_platform_id /* platform */, + const char * /* func_name */) CL_API_SUFFIX__VERSION_1_2; + + +/* Deprecated OpenCL 1.1 APIs */ +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL +clCreateImage2D(cl_context /* context */, + cl_mem_flags /* flags */, + const cl_image_format * /* image_format */, + size_t /* image_width */, + size_t /* image_height */, + size_t /* image_row_pitch */, + void * /* host_ptr */, + cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL +clCreateImage3D(cl_context /* context */, + cl_mem_flags /* flags */, + const cl_image_format * /* image_format */, + size_t /* image_width */, + size_t /* image_height */, + size_t /* image_depth */, + size_t /* image_row_pitch */, + size_t /* image_slice_pitch */, + void * /* host_ptr */, + cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL +clEnqueueMarker(cl_command_queue /* command_queue */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL +clEnqueueWaitForEvents(cl_command_queue /* command_queue */, + cl_uint /* num_events */, + const cl_event * /* event_list */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL +clEnqueueBarrier(cl_command_queue /* command_queue */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL +clUnloadCompiler(void) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED void * CL_API_CALL +clGetExtensionFunctionAddress(const char * /* func_name */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; + +/* Deprecated OpenCL 2.0 APIs */ +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_command_queue CL_API_CALL +clCreateCommandQueue(cl_context /* context */, + cl_device_id /* device */, + cl_command_queue_properties /* properties */, + cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED; + + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_sampler CL_API_CALL +clCreateSampler(cl_context /* context */, + cl_bool /* normalized_coords */, + cl_addressing_mode /* addressing_mode */, + cl_filter_mode /* filter_mode */, + cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED; + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_int CL_API_CALL +clEnqueueTask(cl_command_queue /* command_queue */, + cl_kernel /* kernel */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_CL_H */ + diff --git a/sources/pvrsdk/Include/CL/cl2.hpp b/sources/pvrsdk/Include/CL/cl2.hpp new file mode 100755 index 00000000..70bd5984 --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl2.hpp @@ -0,0 +1,9569 @@ +/******************************************************************************* + * Copyright (c) 2008-2016 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + ******************************************************************************/ + +/*! \file + * + * \brief C++ bindings for OpenCL 1.0 (rev 48), OpenCL 1.1 (rev 33), + * OpenCL 1.2 (rev 15) and OpenCL 2.0 (rev 29) + * \author Lee Howes and Bruce Merry + * + * Derived from the OpenCL 1.x C++ bindings written by + * Benedict R. Gaster, Laurent Morichetti and Lee Howes + * With additions and fixes from: + * Brian Cole, March 3rd 2010 and April 2012 + * Matt Gruenke, April 2012. + * Bruce Merry, February 2013. + * Tom Deakin and Simon McIntosh-Smith, July 2013 + * James Price, 2015- + * + * \version 2.0.10 + * \date 2016-07-20 + * + * Optional extension support + * + * cl_ext_device_fission + * #define CL_HPP_USE_CL_DEVICE_FISSION + * cl_khr_d3d10_sharing + * #define CL_HPP_USE_DX_INTEROP + * cl_khr_sub_groups + * #define CL_HPP_USE_CL_SUB_GROUPS_KHR + * + * Doxygen documentation for this header is available here: + * + * http://khronosgroup.github.io/OpenCL-CLHPP/ + * + * The latest version of this header can be found on the GitHub releases page: + * + * https://github.com/KhronosGroup/OpenCL-CLHPP/releases + * + * Bugs and patches can be submitted to the GitHub repository: + * + * https://github.com/KhronosGroup/OpenCL-CLHPP + */ + +/*! \mainpage + * \section intro Introduction + * For many large applications C++ is the language of choice and so it seems + * reasonable to define C++ bindings for OpenCL. + * + * The interface is contained with a single C++ header file \em cl2.hpp and all + * definitions are contained within the namespace \em cl. There is no additional + * requirement to include \em cl.h and to use either the C++ or original C + * bindings; it is enough to simply include \em cl2.hpp. + * + * The bindings themselves are lightweight and correspond closely to the + * underlying C API. Using the C++ bindings introduces no additional execution + * overhead. + * + * There are numerous compatibility, portability and memory management + * fixes in the new header as well as additional OpenCL 2.0 features. + * As a result the header is not directly backward compatible and for this + * reason we release it as cl2.hpp rather than a new version of cl.hpp. + * + * + * \section compatibility Compatibility + * Due to the evolution of the underlying OpenCL API the 2.0 C++ bindings + * include an updated approach to defining supported feature versions + * and the range of valid underlying OpenCL runtime versions supported. + * + * The combination of preprocessor macros CL_HPP_TARGET_OPENCL_VERSION and + * CL_HPP_MINIMUM_OPENCL_VERSION control this range. These are three digit + * decimal values representing OpenCL runime versions. The default for + * the target is 200, representing OpenCL 2.0 and the minimum is also + * defined as 200. These settings would use 2.0 API calls only. + * If backward compatibility with a 1.2 runtime is required, the minimum + * version may be set to 120. + * + * Note that this is a compile-time setting, and so affects linking against + * a particular SDK version rather than the versioning of the loaded runtime. + * + * The earlier versions of the header included basic vector and string + * classes based loosely on STL versions. These were difficult to + * maintain and very rarely used. For the 2.0 header we now assume + * the presence of the standard library unless requested otherwise. + * We use std::array, std::vector, std::shared_ptr and std::string + * throughout to safely manage memory and reduce the chance of a + * recurrance of earlier memory management bugs. + * + * These classes are used through typedefs in the cl namespace: + * cl::array, cl::vector, cl::pointer and cl::string. + * In addition cl::allocate_pointer forwards to std::allocate_shared + * by default. + * In all cases these standard library classes can be replaced with + * custom interface-compatible versions using the CL_HPP_NO_STD_ARRAY, + * CL_HPP_NO_STD_VECTOR, CL_HPP_NO_STD_UNIQUE_PTR and + * CL_HPP_NO_STD_STRING macros. + * + * The OpenCL 1.x versions of the C++ bindings included a size_t wrapper + * class to interface with kernel enqueue. This caused unpleasant interactions + * with the standard size_t declaration and led to namespacing bugs. + * In the 2.0 version we have replaced this with a std::array-based interface. + * However, the old behaviour can be regained for backward compatibility + * using the CL_HPP_ENABLE_SIZE_T_COMPATIBILITY macro. + * + * Finally, the program construction interface used a clumsy vector-of-pairs + * design in the earlier versions. We have replaced that with a cleaner + * vector-of-vectors and vector-of-strings design. However, for backward + * compatibility old behaviour can be regained with the + * CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY macro. + * + * In OpenCL 2.0 OpenCL C is not entirely backward compatibility with + * earlier versions. As a result a flag must be passed to the OpenCL C + * compiled to request OpenCL 2.0 compilation of kernels with 1.2 as + * the default in the absence of the flag. + * In some cases the C++ bindings automatically compile code for ease. + * For those cases the compilation defaults to OpenCL C 2.0. + * If this is not wanted, the CL_HPP_CL_1_2_DEFAULT_BUILD macro may + * be specified to assume 1.2 compilation. + * If more fine-grained decisions on a per-kernel bases are required + * then explicit build operations that take the flag should be used. + * + * + * \section parameterization Parameters + * This header may be parameterized by a set of preprocessor macros. + * + * - CL_HPP_TARGET_OPENCL_VERSION + * + * Defines the target OpenCL runtime version to build the header + * against. Defaults to 200, representing OpenCL 2.0. + * + * - CL_HPP_NO_STD_STRING + * + * Do not use the standard library string class. cl::string is not + * defined and may be defined by the user before cl2.hpp is + * included. + * + * - CL_HPP_NO_STD_VECTOR + * + * Do not use the standard library vector class. cl::vector is not + * defined and may be defined by the user before cl2.hpp is + * included. + * + * - CL_HPP_NO_STD_ARRAY + * + * Do not use the standard library array class. cl::array is not + * defined and may be defined by the user before cl2.hpp is + * included. + * + * - CL_HPP_NO_STD_UNIQUE_PTR + * + * Do not use the standard library unique_ptr class. cl::pointer and + * the cl::allocate_pointer functions are not defined and may be + * defined by the user before cl2.hpp is included. + * + * - CL_HPP_ENABLE_DEVICE_FISSION + * + * Enables device fission for OpenCL 1.2 platforms. + * + * - CL_HPP_ENABLE_EXCEPTIONS + * + * Enable exceptions for use in the C++ bindings header. This is the + * preferred error handling mechanism but is not required. + * + * - CL_HPP_ENABLE_SIZE_T_COMPATIBILITY + * + * Backward compatibility option to support cl.hpp-style size_t + * class. Replaces the updated std::array derived version and + * removal of size_t from the namespace. Note that in this case the + * new size_t class is placed in the cl::compatibility namespace and + * thus requires an additional using declaration for direct backward + * compatibility. + * + * - CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY + * + * Enable older vector of pairs interface for construction of + * programs. + * + * - CL_HPP_CL_1_2_DEFAULT_BUILD + * + * Default to OpenCL C 1.2 compilation rather than OpenCL C 2.0 + * applies to use of cl::Program construction and other program + * build variants. + * + * + * \section example Example + * + * The following example shows a general use case for the C++ + * bindings, including support for the optional exception feature and + * also the supplied vector and string classes, see following sections for + * decriptions of these features. + * + * \code + #define CL_HPP_ENABLE_EXCEPTIONS + #define CL_HPP_TARGET_OPENCL_VERSION 200 + + #include + #include + #include + #include + #include + + const int numElements = 32; + + int main(void) + { + // Filter for a 2.0 platform and set it as the default + std::vector platforms; + cl::Platform::get(&platforms); + cl::Platform plat; + for (auto &p : platforms) { + std::string platver = p.getInfo(); + if (platver.find("OpenCL 2.") != std::string::npos) { + plat = p; + } + } + if (plat() == 0) { + std::cout << "No OpenCL 2.0 platform found."; + return -1; + } + + cl::Platform newP = cl::Platform::setDefault(plat); + if (newP != plat) { + std::cout << "Error setting default platform."; + return -1; + } + + // Use C++11 raw string literals for kernel source code + std::string kernel1{R"CLC( + global int globalA; + kernel void updateGlobal() + { + globalA = 75; + } + )CLC"}; + std::string kernel2{R"CLC( + typedef struct { global int *bar; } Foo; + kernel void vectorAdd(global const Foo* aNum, global const int *inputA, global const int *inputB, + global int *output, int val, write_only pipe int outPipe, queue_t childQueue) + { + output[get_global_id(0)] = inputA[get_global_id(0)] + inputB[get_global_id(0)] + val + *(aNum->bar); + write_pipe(outPipe, &val); + queue_t default_queue = get_default_queue(); + ndrange_t ndrange = ndrange_1D(get_global_size(0)/2, get_global_size(0)/2); + + // Have a child kernel write into third quarter of output + enqueue_kernel(default_queue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange, + ^{ + output[get_global_size(0)*2 + get_global_id(0)] = + inputA[get_global_size(0)*2 + get_global_id(0)] + inputB[get_global_size(0)*2 + get_global_id(0)] + globalA; + }); + + // Have a child kernel write into last quarter of output + enqueue_kernel(childQueue, CLK_ENQUEUE_FLAGS_WAIT_KERNEL, ndrange, + ^{ + output[get_global_size(0)*3 + get_global_id(0)] = + inputA[get_global_size(0)*3 + get_global_id(0)] + inputB[get_global_size(0)*3 + get_global_id(0)] + globalA + 2; + }); + } + )CLC"}; + + // New simpler string interface style + std::vector programStrings {kernel1, kernel2}; + + cl::Program vectorAddProgram(programStrings); + try { + vectorAddProgram.build("-cl-std=CL2.0"); + } + catch (...) { + // Print build info for all devices + cl_int buildErr = CL_SUCCESS; + auto buildInfo = vectorAddProgram.getBuildInfo(&buildErr); + for (auto &pair : buildInfo) { + std::cerr << pair.second << std::endl << std::endl; + } + + return 1; + } + + typedef struct { int *bar; } Foo; + + // Get and run kernel that initializes the program-scope global + // A test for kernels that take no arguments + auto program2Kernel = + cl::KernelFunctor<>(vectorAddProgram, "updateGlobal"); + program2Kernel( + cl::EnqueueArgs( + cl::NDRange(1))); + + ////////////////// + // SVM allocations + + auto anSVMInt = cl::allocate_svm>(); + *anSVMInt = 5; + cl::SVMAllocator>> svmAllocReadOnly; + auto fooPointer = cl::allocate_pointer(svmAllocReadOnly); + fooPointer->bar = anSVMInt.get(); + cl::SVMAllocator> svmAlloc; + std::vector>> inputA(numElements, 1, svmAlloc); + cl::coarse_svm_vector inputB(numElements, 2, svmAlloc); + + // + ////////////// + + // Traditional cl_mem allocations + std::vector output(numElements, 0xdeadbeef); + cl::Buffer outputBuffer(begin(output), end(output), false); + cl::Pipe aPipe(sizeof(cl_int), numElements / 2); + + // Default command queue, also passed in as a parameter + cl::DeviceCommandQueue defaultDeviceQueue = cl::DeviceCommandQueue::makeDefault( + cl::Context::getDefault(), cl::Device::getDefault()); + + auto vectorAddKernel = + cl::KernelFunctor< + decltype(fooPointer)&, + int*, + cl::coarse_svm_vector&, + cl::Buffer, + int, + cl::Pipe&, + cl::DeviceCommandQueue + >(vectorAddProgram, "vectorAdd"); + + // Ensure that the additional SVM pointer is available to the kernel + // This one was not passed as a parameter + vectorAddKernel.setSVMPointers(anSVMInt); + + // Hand control of coarse allocations to runtime + cl::enqueueUnmapSVM(anSVMInt); + cl::enqueueUnmapSVM(fooPointer); + cl::unmapSVM(inputB); + cl::unmapSVM(output2); + + cl_int error; + vectorAddKernel( + cl::EnqueueArgs( + cl::NDRange(numElements/2), + cl::NDRange(numElements/2)), + fooPointer, + inputA.data(), + inputB, + outputBuffer, + 3, + aPipe, + defaultDeviceQueue, + error + ); + + cl::copy(outputBuffer, begin(output), end(output)); + // Grab the SVM output vector using a map + cl::mapSVM(output2); + + cl::Device d = cl::Device::getDefault(); + + std::cout << "Output:\n"; + for (int i = 1; i < numElements; ++i) { + std::cout << "\t" << output[i] << "\n"; + } + std::cout << "\n\n"; + + return 0; + } + * + * \endcode + * + */ +#ifndef CL_HPP_ +#define CL_HPP_ + +/* Handle deprecated preprocessor definitions. In each case, we only check for + * the old name if the new name is not defined, so that user code can define + * both and hence work with either version of the bindings. + */ +#if !defined(CL_HPP_USE_DX_INTEROP) && defined(USE_DX_INTEROP) +# pragma message("cl2.hpp: USE_DX_INTEROP is deprecated. Define CL_HPP_USE_DX_INTEROP instead") +# define CL_HPP_USE_DX_INTEROP +#endif +#if !defined(CL_HPP_USE_CL_DEVICE_FISSION) && defined(USE_CL_DEVICE_FISSION) +# pragma message("cl2.hpp: USE_CL_DEVICE_FISSION is deprecated. Define CL_HPP_USE_CL_DEVICE_FISSION instead") +# define CL_HPP_USE_CL_DEVICE_FISSION +#endif +#if !defined(CL_HPP_ENABLE_EXCEPTIONS) && defined(__CL_ENABLE_EXCEPTIONS) +# pragma message("cl2.hpp: __CL_ENABLE_EXCEPTIONS is deprecated. Define CL_HPP_ENABLE_EXCEPTIONS instead") +# define CL_HPP_ENABLE_EXCEPTIONS +#endif +#if !defined(CL_HPP_NO_STD_VECTOR) && defined(__NO_STD_VECTOR) +# pragma message("cl2.hpp: __NO_STD_VECTOR is deprecated. Define CL_HPP_NO_STD_VECTOR instead") +# define CL_HPP_NO_STD_VECTOR +#endif +#if !defined(CL_HPP_NO_STD_STRING) && defined(__NO_STD_STRING) +# pragma message("cl2.hpp: __NO_STD_STRING is deprecated. Define CL_HPP_NO_STD_STRING instead") +# define CL_HPP_NO_STD_STRING +#endif +#if defined(VECTOR_CLASS) +# pragma message("cl2.hpp: VECTOR_CLASS is deprecated. Alias cl::vector instead") +#endif +#if defined(STRING_CLASS) +# pragma message("cl2.hpp: STRING_CLASS is deprecated. Alias cl::string instead.") +#endif +#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) && defined(__CL_USER_OVERRIDE_ERROR_STRINGS) +# pragma message("cl2.hpp: __CL_USER_OVERRIDE_ERROR_STRINGS is deprecated. Define CL_HPP_USER_OVERRIDE_ERROR_STRINGS instead") +# define CL_HPP_USER_OVERRIDE_ERROR_STRINGS +#endif + +/* Warn about features that are no longer supported + */ +#if defined(__USE_DEV_VECTOR) +# pragma message("cl2.hpp: __USE_DEV_VECTOR is no longer supported. Expect compilation errors") +#endif +#if defined(__USE_DEV_STRING) +# pragma message("cl2.hpp: __USE_DEV_STRING is no longer supported. Expect compilation errors") +#endif + +/* Detect which version to target */ +#if !defined(CL_HPP_TARGET_OPENCL_VERSION) +# pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not defined. It will default to 200 (OpenCL 2.0)") +# define CL_HPP_TARGET_OPENCL_VERSION 200 +#endif +#if CL_HPP_TARGET_OPENCL_VERSION != 100 && CL_HPP_TARGET_OPENCL_VERSION != 110 && CL_HPP_TARGET_OPENCL_VERSION != 120 && CL_HPP_TARGET_OPENCL_VERSION != 200 +# pragma message("cl2.hpp: CL_HPP_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 200") +# undef CL_HPP_TARGET_OPENCL_VERSION +# define CL_HPP_TARGET_OPENCL_VERSION 200 +#endif + +#if !defined(CL_HPP_MINIMUM_OPENCL_VERSION) +# define CL_HPP_MINIMUM_OPENCL_VERSION 200 +#endif +#if CL_HPP_MINIMUM_OPENCL_VERSION != 100 && CL_HPP_MINIMUM_OPENCL_VERSION != 110 && CL_HPP_MINIMUM_OPENCL_VERSION != 120 && CL_HPP_MINIMUM_OPENCL_VERSION != 200 +# pragma message("cl2.hpp: CL_HPP_MINIMUM_OPENCL_VERSION is not a valid value (100, 110, 120 or 200). It will be set to 100") +# undef CL_HPP_MINIMUM_OPENCL_VERSION +# define CL_HPP_MINIMUM_OPENCL_VERSION 100 +#endif +#if CL_HPP_MINIMUM_OPENCL_VERSION > CL_HPP_TARGET_OPENCL_VERSION +# error "CL_HPP_MINIMUM_OPENCL_VERSION must not be greater than CL_HPP_TARGET_OPENCL_VERSION" +#endif + +#if CL_HPP_MINIMUM_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS) +# define CL_USE_DEPRECATED_OPENCL_1_0_APIS +#endif +#if CL_HPP_MINIMUM_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) +# define CL_USE_DEPRECATED_OPENCL_1_1_APIS +#endif +#if CL_HPP_MINIMUM_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS) +# define CL_USE_DEPRECATED_OPENCL_1_2_APIS +#endif +#if CL_HPP_MINIMUM_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS) +# define CL_USE_DEPRECATED_OPENCL_2_0_APIS +#endif + +#ifdef _WIN32 + +#include + +#if defined(CL_HPP_USE_DX_INTEROP) +#include +#include +#endif +#endif // _WIN32 + +#if defined(_MSC_VER) +#include +#endif // _MSC_VER + + // Check for a valid C++ version + +// Need to do both tests here because for some reason __cplusplus is not +// updated in visual studio +#if (!defined(_MSC_VER) && __cplusplus < 201103L) || (defined(_MSC_VER) && _MSC_VER < 1700) +#error Visual studio 2013 or another C++11-supporting compiler required +#endif + +#if defined(__APPLE__) || defined(__MACOSX) +#include "DynamicOCL.h" +#else +#include "DynamicOCL.h" +#endif // !__APPLE__ + +#if defined(CL_HPP_USE_CL_DEVICE_FISSION) || defined(CL_HPP_USE_CL_SUB_GROUPS_KHR) +#include +#endif + +#if (__cplusplus >= 201103L) +#define CL_HPP_NOEXCEPT_ noexcept +#else +#define CL_HPP_NOEXCEPT_ +#endif + +#if defined(_MSC_VER) +# define CL_HPP_DEFINE_STATIC_MEMBER_ __declspec(selectany) +#else +# define CL_HPP_DEFINE_STATIC_MEMBER_ __attribute__((weak)) +#endif // !_MSC_VER + +// Define deprecated prefixes and suffixes to ensure compilation +// in case they are not pre-defined +#if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED) +#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED +#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED) +#if !defined(CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED) +#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED +#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_1_DEPRECATED) + +#if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED) +#define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED +#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED) +#if !defined(CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED) +#define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED +#endif // #if !defined(CL_EXT_PREFIX__VERSION_1_2_DEPRECATED) + +#if !defined(CL_CALLBACK) +#define CL_CALLBACK +#endif //CL_CALLBACK + +#include +#include +#include +#include +#include +#include + + +// Define a size_type to represent a correctly resolved size_t +#if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY) +namespace cl { + using size_type = ::size_t; +} // namespace cl +#else // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY) +namespace cl { + using size_type = size_t; +} // namespace cl +#endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY) + + +#if defined(CL_HPP_ENABLE_EXCEPTIONS) +#include +#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS) + +#if !defined(CL_HPP_NO_STD_VECTOR) +#include +namespace cl { + template < class T, class Alloc = std::allocator > + using vector = std::vector; +} // namespace cl +#endif // #if !defined(CL_HPP_NO_STD_VECTOR) + +#if !defined(CL_HPP_NO_STD_STRING) +#include +namespace cl { + using string = std::string; +} // namespace cl +#endif // #if !defined(CL_HPP_NO_STD_STRING) + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + +#if !defined(CL_HPP_NO_STD_UNIQUE_PTR) +#include +namespace cl { + // Replace unique_ptr and allocate_pointer for internal use + // to allow user to replace them + template + using pointer = std::unique_ptr; +} // namespace cl +#endif +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 +#if !defined(CL_HPP_NO_STD_ARRAY) +#include +namespace cl { + template < class T, size_type N > + using array = std::array; +} // namespace cl +#endif // #if !defined(CL_HPP_NO_STD_ARRAY) + +// Define size_type appropriately to allow backward-compatibility +// use of the old size_t interface class +#if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY) +namespace cl { + namespace compatibility { + /*! \brief class used to interface between C++ and + * OpenCL C calls that require arrays of size_t values, whose + * size is known statically. + */ + template + class size_t + { + private: + size_type data_[N]; + + public: + //! \brief Initialize size_t to all 0s + size_t() + { + for (int i = 0; i < N; ++i) { + data_[i] = 0; + } + } + + size_t(const array &rhs) + { + for (int i = 0; i < N; ++i) { + data_[i] = rhs[i]; + } + } + + size_type& operator[](int index) + { + return data_[index]; + } + + const size_type& operator[](int index) const + { + return data_[index]; + } + + //! \brief Conversion operator to T*. + operator size_type* () { return data_; } + + //! \brief Conversion operator to const T*. + operator const size_type* () const { return data_; } + + operator array() const + { + array ret; + + for (int i = 0; i < N; ++i) { + ret[i] = data_[i]; + } + return ret; + } + }; + } // namespace compatibility + + template + using size_t = compatibility::size_t; +} // namespace cl +#endif // #if defined(CL_HPP_ENABLE_SIZE_T_COMPATIBILITY) + +// Helper alias to avoid confusing the macros +namespace cl { + namespace detail { + using size_t_array = array; + } // namespace detail +} // namespace cl + + +/*! \namespace cl + * + * \brief The OpenCL C++ bindings are defined within this namespace. + * + */ +namespace cl { + class Memory; + +#define CL_HPP_INIT_CL_EXT_FCN_PTR_(name) \ + if (!pfn_##name) { \ + pfn_##name = (PFN_##name) \ + clGetExtensionFunctionAddress(#name); \ + if (!pfn_##name) { \ + } \ + } + +#define CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, name) \ + if (!pfn_##name) { \ + pfn_##name = (PFN_##name) \ + clGetExtensionFunctionAddressForPlatform(platform, #name); \ + if (!pfn_##name) { \ + } \ + } + + class Program; + class Device; + class Context; + class CommandQueue; + class DeviceCommandQueue; + class Memory; + class Buffer; + class Pipe; + +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + /*! \brief Exception class + * + * This may be thrown by API functions when CL_HPP_ENABLE_EXCEPTIONS is defined. + */ + class Error : public std::exception + { + private: + cl_int err_; + const char * errStr_; + public: + /*! \brief Create a new CL error exception for a given error code + * and corresponding message. + * + * \param err error code value. + * + * \param errStr a descriptive string that must remain in scope until + * handling of the exception has concluded. If set, it + * will be returned by what(). + */ + Error(cl_int err, const char * errStr = NULL) : err_(err), errStr_(errStr) + {} + + ~Error() throw() {} + + /*! \brief Get error string associated with exception + * + * \return A memory pointer to the error message string. + */ + virtual const char * what() const throw () + { + if (errStr_ == NULL) { + return "empty"; + } + else { + return errStr_; + } + } + + /*! \brief Get error code associated with exception + * + * \return The error code. + */ + cl_int err(void) const { return err_; } + }; +#define CL_HPP_ERR_STR_(x) #x +#else +#define CL_HPP_ERR_STR_(x) NULL +#endif // CL_HPP_ENABLE_EXCEPTIONS + + +namespace detail +{ +#if defined(CL_HPP_ENABLE_EXCEPTIONS) +static inline cl_int errHandler ( + cl_int err, + const char * errStr = NULL) +{ + if (err != CL_SUCCESS) { + throw Error(err, errStr); + } + return err; +} +#else +static inline cl_int errHandler (cl_int err, const char * errStr = NULL) +{ + (void) errStr; // suppress unused variable warning + return err; +} +#endif // CL_HPP_ENABLE_EXCEPTIONS +} + + + +//! \cond DOXYGEN_DETAIL +#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) +#define __GET_DEVICE_INFO_ERR CL_HPP_ERR_STR_(clGetDeviceInfo) +#define __GET_PLATFORM_INFO_ERR CL_HPP_ERR_STR_(clGetPlatformInfo) +#define __GET_DEVICE_IDS_ERR CL_HPP_ERR_STR_(clGetDeviceIDs) +#define __GET_PLATFORM_IDS_ERR CL_HPP_ERR_STR_(clGetPlatformIDs) +#define __GET_CONTEXT_INFO_ERR CL_HPP_ERR_STR_(clGetContextInfo) +#define __GET_EVENT_INFO_ERR CL_HPP_ERR_STR_(clGetEventInfo) +#define __GET_EVENT_PROFILE_INFO_ERR CL_HPP_ERR_STR_(clGetEventProfileInfo) +#define __GET_MEM_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetMemObjectInfo) +#define __GET_IMAGE_INFO_ERR CL_HPP_ERR_STR_(clGetImageInfo) +#define __GET_SAMPLER_INFO_ERR CL_HPP_ERR_STR_(clGetSamplerInfo) +#define __GET_KERNEL_INFO_ERR CL_HPP_ERR_STR_(clGetKernelInfo) +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __GET_KERNEL_ARG_INFO_ERR CL_HPP_ERR_STR_(clGetKernelArgInfo) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __GET_KERNEL_WORK_GROUP_INFO_ERR CL_HPP_ERR_STR_(clGetKernelWorkGroupInfo) +#define __GET_PROGRAM_INFO_ERR CL_HPP_ERR_STR_(clGetProgramInfo) +#define __GET_PROGRAM_BUILD_INFO_ERR CL_HPP_ERR_STR_(clGetProgramBuildInfo) +#define __GET_COMMAND_QUEUE_INFO_ERR CL_HPP_ERR_STR_(clGetCommandQueueInfo) + +#define __CREATE_CONTEXT_ERR CL_HPP_ERR_STR_(clCreateContext) +#define __CREATE_CONTEXT_FROM_TYPE_ERR CL_HPP_ERR_STR_(clCreateContextFromType) +#define __GET_SUPPORTED_IMAGE_FORMATS_ERR CL_HPP_ERR_STR_(clGetSupportedImageFormats) + +#define __CREATE_BUFFER_ERR CL_HPP_ERR_STR_(clCreateBuffer) +#define __COPY_ERR CL_HPP_ERR_STR_(cl::copy) +#define __CREATE_SUBBUFFER_ERR CL_HPP_ERR_STR_(clCreateSubBuffer) +#define __CREATE_GL_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer) +#define __CREATE_GL_RENDER_BUFFER_ERR CL_HPP_ERR_STR_(clCreateFromGLBuffer) +#define __GET_GL_OBJECT_INFO_ERR CL_HPP_ERR_STR_(clGetGLObjectInfo) +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __CREATE_IMAGE_ERR CL_HPP_ERR_STR_(clCreateImage) +#define __CREATE_GL_TEXTURE_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture) +#define __IMAGE_DIMENSION_ERR CL_HPP_ERR_STR_(Incorrect image dimensions) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR CL_HPP_ERR_STR_(clSetMemObjectDestructorCallback) + +#define __CREATE_USER_EVENT_ERR CL_HPP_ERR_STR_(clCreateUserEvent) +#define __SET_USER_EVENT_STATUS_ERR CL_HPP_ERR_STR_(clSetUserEventStatus) +#define __SET_EVENT_CALLBACK_ERR CL_HPP_ERR_STR_(clSetEventCallback) +#define __WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clWaitForEvents) + +#define __CREATE_KERNEL_ERR CL_HPP_ERR_STR_(clCreateKernel) +#define __SET_KERNEL_ARGS_ERR CL_HPP_ERR_STR_(clSetKernelArg) +#define __CREATE_PROGRAM_WITH_SOURCE_ERR CL_HPP_ERR_STR_(clCreateProgramWithSource) +#define __CREATE_PROGRAM_WITH_BINARY_ERR CL_HPP_ERR_STR_(clCreateProgramWithBinary) +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR CL_HPP_ERR_STR_(clCreateProgramWithBuiltInKernels) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __BUILD_PROGRAM_ERR CL_HPP_ERR_STR_(clBuildProgram) +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __COMPILE_PROGRAM_ERR CL_HPP_ERR_STR_(clCompileProgram) +#define __LINK_PROGRAM_ERR CL_HPP_ERR_STR_(clLinkProgram) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __CREATE_KERNELS_IN_PROGRAM_ERR CL_HPP_ERR_STR_(clCreateKernelsInProgram) + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +#define __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateCommandQueueWithProperties) +#define __CREATE_SAMPLER_WITH_PROPERTIES_ERR CL_HPP_ERR_STR_(clCreateSamplerWithProperties) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200 +#define __SET_COMMAND_QUEUE_PROPERTY_ERR CL_HPP_ERR_STR_(clSetCommandQueueProperty) +#define __ENQUEUE_READ_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueReadBuffer) +#define __ENQUEUE_READ_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueReadBufferRect) +#define __ENQUEUE_WRITE_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueWriteBuffer) +#define __ENQUEUE_WRITE_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueWriteBufferRect) +#define __ENQEUE_COPY_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyBuffer) +#define __ENQEUE_COPY_BUFFER_RECT_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferRect) +#define __ENQUEUE_FILL_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueFillBuffer) +#define __ENQUEUE_READ_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueReadImage) +#define __ENQUEUE_WRITE_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueWriteImage) +#define __ENQUEUE_COPY_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyImage) +#define __ENQUEUE_FILL_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueFillImage) +#define __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueCopyImageToBuffer) +#define __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueCopyBufferToImage) +#define __ENQUEUE_MAP_BUFFER_ERR CL_HPP_ERR_STR_(clEnqueueMapBuffer) +#define __ENQUEUE_MAP_IMAGE_ERR CL_HPP_ERR_STR_(clEnqueueMapImage) +#define __ENQUEUE_UNMAP_MEM_OBJECT_ERR CL_HPP_ERR_STR_(clEnqueueUnMapMemObject) +#define __ENQUEUE_NDRANGE_KERNEL_ERR CL_HPP_ERR_STR_(clEnqueueNDRangeKernel) +#define __ENQUEUE_NATIVE_KERNEL CL_HPP_ERR_STR_(clEnqueueNativeKernel) +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __ENQUEUE_MIGRATE_MEM_OBJECTS_ERR CL_HPP_ERR_STR_(clEnqueueMigrateMemObjects) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + +#define __ENQUEUE_ACQUIRE_GL_ERR CL_HPP_ERR_STR_(clEnqueueAcquireGLObjects) +#define __ENQUEUE_RELEASE_GL_ERR CL_HPP_ERR_STR_(clEnqueueReleaseGLObjects) + +#define __CREATE_PIPE_ERR CL_HPP_ERR_STR_(clCreatePipe) +#define __GET_PIPE_INFO_ERR CL_HPP_ERR_STR_(clGetPipeInfo) + + +#define __RETAIN_ERR CL_HPP_ERR_STR_(Retain Object) +#define __RELEASE_ERR CL_HPP_ERR_STR_(Release Object) +#define __FLUSH_ERR CL_HPP_ERR_STR_(clFlush) +#define __FINISH_ERR CL_HPP_ERR_STR_(clFinish) +#define __VECTOR_CAPACITY_ERR CL_HPP_ERR_STR_(Vector capacity error) + +/** + * CL 1.2 version that uses device fission. + */ +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevices) +#else +#define __CREATE_SUB_DEVICES_ERR CL_HPP_ERR_STR_(clCreateSubDevicesEXT) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + +/** + * Deprecated APIs for 1.2 + */ +#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) +#define __ENQUEUE_MARKER_ERR CL_HPP_ERR_STR_(clEnqueueMarker) +#define __ENQUEUE_WAIT_FOR_EVENTS_ERR CL_HPP_ERR_STR_(clEnqueueWaitForEvents) +#define __ENQUEUE_BARRIER_ERR CL_HPP_ERR_STR_(clEnqueueBarrier) +#define __UNLOAD_COMPILER_ERR CL_HPP_ERR_STR_(clUnloadCompiler) +#define __CREATE_GL_TEXTURE_2D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture2D) +#define __CREATE_GL_TEXTURE_3D_ERR CL_HPP_ERR_STR_(clCreateFromGLTexture3D) +#define __CREATE_IMAGE2D_ERR CL_HPP_ERR_STR_(clCreateImage2D) +#define __CREATE_IMAGE3D_ERR CL_HPP_ERR_STR_(clCreateImage3D) +#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) + +/** + * Deprecated APIs for 2.0 + */ +#if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS) +#define __CREATE_COMMAND_QUEUE_ERR CL_HPP_ERR_STR_(clCreateCommandQueue) +#define __ENQUEUE_TASK_ERR CL_HPP_ERR_STR_(clEnqueueTask) +#define __CREATE_SAMPLER_ERR CL_HPP_ERR_STR_(clCreateSampler) +#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) + +/** + * CL 1.2 marker and barrier commands + */ +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +#define __ENQUEUE_MARKER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueMarkerWithWaitList) +#define __ENQUEUE_BARRIER_WAIT_LIST_ERR CL_HPP_ERR_STR_(clEnqueueBarrierWithWaitList) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + +#endif // CL_HPP_USER_OVERRIDE_ERROR_STRINGS +//! \endcond + + +namespace detail { + +// Generic getInfoHelper. The final parameter is used to guide overload +// resolution: the actual parameter passed is an int, which makes this +// a worse conversion sequence than a specialization that declares the +// parameter as an int. +template +inline cl_int getInfoHelper(Functor f, cl_uint name, T* param, long) +{ + return f(name, sizeof(T), param, NULL); +} + +// Specialized for getInfo +// Assumes that the output vector was correctly resized on the way in +template +inline cl_int getInfoHelper(Func f, cl_uint name, vector>* param, int) +{ + if (name != CL_PROGRAM_BINARIES) { + return CL_INVALID_VALUE; + } + if (param) { + // Create array of pointers, calculate total size and pass pointer array in + size_type numBinaries = param->size(); + vector binariesPointers(numBinaries); + + for (size_type i = 0; i < numBinaries; ++i) + { + binariesPointers[i] = (*param)[i].data(); + } + + cl_int err = f(name, numBinaries * sizeof(unsigned char*), binariesPointers.data(), NULL); + + if (err != CL_SUCCESS) { + return err; + } + } + + + return CL_SUCCESS; +} + +// Specialized getInfoHelper for vector params +template +inline cl_int getInfoHelper(Func f, cl_uint name, vector* param, long) +{ + size_type required; + cl_int err = f(name, 0, NULL, &required); + if (err != CL_SUCCESS) { + return err; + } + const size_type elements = required / sizeof(T); + + // Temporary to avoid changing param on an error + vector localData(elements); + err = f(name, required, localData.data(), NULL); + if (err != CL_SUCCESS) { + return err; + } + if (param) { + *param = std::move(localData); + } + + return CL_SUCCESS; +} + +/* Specialization for reference-counted types. This depends on the + * existence of Wrapper::cl_type, and none of the other types having the + * cl_type member. Note that simplify specifying the parameter as Wrapper + * does not work, because when using a derived type (e.g. Context) the generic + * template will provide a better match. + */ +template +inline cl_int getInfoHelper( + Func f, cl_uint name, vector* param, int, typename T::cl_type = 0) +{ + size_type required; + cl_int err = f(name, 0, NULL, &required); + if (err != CL_SUCCESS) { + return err; + } + + const size_type elements = required / sizeof(typename T::cl_type); + + vector value(elements); + err = f(name, required, value.data(), NULL); + if (err != CL_SUCCESS) { + return err; + } + + if (param) { + // Assign to convert CL type to T for each element + param->resize(elements); + + // Assign to param, constructing with retain behaviour + // to correctly capture each underlying CL object + for (size_type i = 0; i < elements; i++) { + (*param)[i] = T(value[i], true); + } + } + return CL_SUCCESS; +} + +// Specialized GetInfoHelper for string params +template +inline cl_int getInfoHelper(Func f, cl_uint name, string* param, long) +{ + size_type required; + cl_int err = f(name, 0, NULL, &required); + if (err != CL_SUCCESS) { + return err; + } + + // std::string has a constant data member + // a char vector does not + if (required > 0) { + vector value(required); + err = f(name, required, value.data(), NULL); + if (err != CL_SUCCESS) { + return err; + } + if (param) { + param->assign(begin(value), prev(end(value))); + } + } + else if (param) { + param->assign(""); + } + return CL_SUCCESS; +} + +// Specialized GetInfoHelper for clsize_t params +template +inline cl_int getInfoHelper(Func f, cl_uint name, array* param, long) +{ + size_type required; + cl_int err = f(name, 0, NULL, &required); + if (err != CL_SUCCESS) { + return err; + } + + size_type elements = required / sizeof(size_type); + vector value(elements, 0); + + err = f(name, required, value.data(), NULL); + if (err != CL_SUCCESS) { + return err; + } + + // Bound the copy with N to prevent overruns + // if passed N > than the amount copied + if (elements > N) { + elements = N; + } + for (size_type i = 0; i < elements; ++i) { + (*param)[i] = value[i]; + } + + return CL_SUCCESS; +} + +template struct ReferenceHandler; + +/* Specialization for reference-counted types. This depends on the + * existence of Wrapper::cl_type, and none of the other types having the + * cl_type member. Note that simplify specifying the parameter as Wrapper + * does not work, because when using a derived type (e.g. Context) the generic + * template will provide a better match. + */ +template +inline cl_int getInfoHelper(Func f, cl_uint name, T* param, int, typename T::cl_type = 0) +{ + typename T::cl_type value; + cl_int err = f(name, sizeof(value), &value, NULL); + if (err != CL_SUCCESS) { + return err; + } + *param = value; + if (value != NULL) + { + err = param->retain(); + if (err != CL_SUCCESS) { + return err; + } + } + return CL_SUCCESS; +} + +#define CL_HPP_PARAM_NAME_INFO_1_0_(F) \ + F(cl_platform_info, CL_PLATFORM_PROFILE, string) \ + F(cl_platform_info, CL_PLATFORM_VERSION, string) \ + F(cl_platform_info, CL_PLATFORM_NAME, string) \ + F(cl_platform_info, CL_PLATFORM_VENDOR, string) \ + F(cl_platform_info, CL_PLATFORM_EXTENSIONS, string) \ + \ + F(cl_device_info, CL_DEVICE_TYPE, cl_device_type) \ + F(cl_device_info, CL_DEVICE_VENDOR_ID, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_COMPUTE_UNITS, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_WORK_GROUP_SIZE, size_type) \ + F(cl_device_info, CL_DEVICE_MAX_WORK_ITEM_SIZES, cl::vector) \ + F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, cl_uint) \ + F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, cl_uint) \ + F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, cl_uint) \ + F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, cl_uint) \ + F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, cl_uint) \ + F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_CLOCK_FREQUENCY, cl_uint) \ + F(cl_device_info, CL_DEVICE_ADDRESS_BITS, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_READ_IMAGE_ARGS, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_MEM_ALLOC_SIZE, cl_ulong) \ + F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_WIDTH, size_type) \ + F(cl_device_info, CL_DEVICE_IMAGE2D_MAX_HEIGHT, size_type) \ + F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_WIDTH, size_type) \ + F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_HEIGHT, size_type) \ + F(cl_device_info, CL_DEVICE_IMAGE3D_MAX_DEPTH, size_type) \ + F(cl_device_info, CL_DEVICE_IMAGE_SUPPORT, cl_bool) \ + F(cl_device_info, CL_DEVICE_MAX_PARAMETER_SIZE, size_type) \ + F(cl_device_info, CL_DEVICE_MAX_SAMPLERS, cl_uint) \ + F(cl_device_info, CL_DEVICE_MEM_BASE_ADDR_ALIGN, cl_uint) \ + F(cl_device_info, CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE, cl_uint) \ + F(cl_device_info, CL_DEVICE_SINGLE_FP_CONFIG, cl_device_fp_config) \ + F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, cl_device_mem_cache_type) \ + F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, cl_uint)\ + F(cl_device_info, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, cl_ulong) \ + F(cl_device_info, CL_DEVICE_GLOBAL_MEM_SIZE, cl_ulong) \ + F(cl_device_info, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, cl_ulong) \ + F(cl_device_info, CL_DEVICE_MAX_CONSTANT_ARGS, cl_uint) \ + F(cl_device_info, CL_DEVICE_LOCAL_MEM_TYPE, cl_device_local_mem_type) \ + F(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE, cl_ulong) \ + F(cl_device_info, CL_DEVICE_ERROR_CORRECTION_SUPPORT, cl_bool) \ + F(cl_device_info, CL_DEVICE_PROFILING_TIMER_RESOLUTION, size_type) \ + F(cl_device_info, CL_DEVICE_ENDIAN_LITTLE, cl_bool) \ + F(cl_device_info, CL_DEVICE_AVAILABLE, cl_bool) \ + F(cl_device_info, CL_DEVICE_COMPILER_AVAILABLE, cl_bool) \ + F(cl_device_info, CL_DEVICE_EXECUTION_CAPABILITIES, cl_device_exec_capabilities) \ + F(cl_device_info, CL_DEVICE_PLATFORM, cl_platform_id) \ + F(cl_device_info, CL_DEVICE_NAME, string) \ + F(cl_device_info, CL_DEVICE_VENDOR, string) \ + F(cl_device_info, CL_DRIVER_VERSION, string) \ + F(cl_device_info, CL_DEVICE_PROFILE, string) \ + F(cl_device_info, CL_DEVICE_VERSION, string) \ + F(cl_device_info, CL_DEVICE_EXTENSIONS, string) \ + \ + F(cl_context_info, CL_CONTEXT_REFERENCE_COUNT, cl_uint) \ + F(cl_context_info, CL_CONTEXT_DEVICES, cl::vector) \ + F(cl_context_info, CL_CONTEXT_PROPERTIES, cl::vector) \ + \ + F(cl_event_info, CL_EVENT_COMMAND_QUEUE, cl::CommandQueue) \ + F(cl_event_info, CL_EVENT_COMMAND_TYPE, cl_command_type) \ + F(cl_event_info, CL_EVENT_REFERENCE_COUNT, cl_uint) \ + F(cl_event_info, CL_EVENT_COMMAND_EXECUTION_STATUS, cl_int) \ + \ + F(cl_profiling_info, CL_PROFILING_COMMAND_QUEUED, cl_ulong) \ + F(cl_profiling_info, CL_PROFILING_COMMAND_SUBMIT, cl_ulong) \ + F(cl_profiling_info, CL_PROFILING_COMMAND_START, cl_ulong) \ + F(cl_profiling_info, CL_PROFILING_COMMAND_END, cl_ulong) \ + \ + F(cl_mem_info, CL_MEM_TYPE, cl_mem_object_type) \ + F(cl_mem_info, CL_MEM_FLAGS, cl_mem_flags) \ + F(cl_mem_info, CL_MEM_SIZE, size_type) \ + F(cl_mem_info, CL_MEM_HOST_PTR, void*) \ + F(cl_mem_info, CL_MEM_MAP_COUNT, cl_uint) \ + F(cl_mem_info, CL_MEM_REFERENCE_COUNT, cl_uint) \ + F(cl_mem_info, CL_MEM_CONTEXT, cl::Context) \ + \ + F(cl_image_info, CL_IMAGE_FORMAT, cl_image_format) \ + F(cl_image_info, CL_IMAGE_ELEMENT_SIZE, size_type) \ + F(cl_image_info, CL_IMAGE_ROW_PITCH, size_type) \ + F(cl_image_info, CL_IMAGE_SLICE_PITCH, size_type) \ + F(cl_image_info, CL_IMAGE_WIDTH, size_type) \ + F(cl_image_info, CL_IMAGE_HEIGHT, size_type) \ + F(cl_image_info, CL_IMAGE_DEPTH, size_type) \ + \ + F(cl_sampler_info, CL_SAMPLER_REFERENCE_COUNT, cl_uint) \ + F(cl_sampler_info, CL_SAMPLER_CONTEXT, cl::Context) \ + F(cl_sampler_info, CL_SAMPLER_NORMALIZED_COORDS, cl_bool) \ + F(cl_sampler_info, CL_SAMPLER_ADDRESSING_MODE, cl_addressing_mode) \ + F(cl_sampler_info, CL_SAMPLER_FILTER_MODE, cl_filter_mode) \ + \ + F(cl_program_info, CL_PROGRAM_REFERENCE_COUNT, cl_uint) \ + F(cl_program_info, CL_PROGRAM_CONTEXT, cl::Context) \ + F(cl_program_info, CL_PROGRAM_NUM_DEVICES, cl_uint) \ + F(cl_program_info, CL_PROGRAM_DEVICES, cl::vector) \ + F(cl_program_info, CL_PROGRAM_SOURCE, string) \ + F(cl_program_info, CL_PROGRAM_BINARY_SIZES, cl::vector) \ + F(cl_program_info, CL_PROGRAM_BINARIES, cl::vector>) \ + \ + F(cl_program_build_info, CL_PROGRAM_BUILD_STATUS, cl_build_status) \ + F(cl_program_build_info, CL_PROGRAM_BUILD_OPTIONS, string) \ + F(cl_program_build_info, CL_PROGRAM_BUILD_LOG, string) \ + \ + F(cl_kernel_info, CL_KERNEL_FUNCTION_NAME, string) \ + F(cl_kernel_info, CL_KERNEL_NUM_ARGS, cl_uint) \ + F(cl_kernel_info, CL_KERNEL_REFERENCE_COUNT, cl_uint) \ + F(cl_kernel_info, CL_KERNEL_CONTEXT, cl::Context) \ + F(cl_kernel_info, CL_KERNEL_PROGRAM, cl::Program) \ + \ + F(cl_kernel_work_group_info, CL_KERNEL_WORK_GROUP_SIZE, size_type) \ + F(cl_kernel_work_group_info, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, cl::detail::size_t_array) \ + F(cl_kernel_work_group_info, CL_KERNEL_LOCAL_MEM_SIZE, cl_ulong) \ + \ + F(cl_command_queue_info, CL_QUEUE_CONTEXT, cl::Context) \ + F(cl_command_queue_info, CL_QUEUE_DEVICE, cl::Device) \ + F(cl_command_queue_info, CL_QUEUE_REFERENCE_COUNT, cl_uint) \ + F(cl_command_queue_info, CL_QUEUE_PROPERTIES, cl_command_queue_properties) + + +#define CL_HPP_PARAM_NAME_INFO_1_1_(F) \ + F(cl_context_info, CL_CONTEXT_NUM_DEVICES, cl_uint)\ + F(cl_device_info, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, cl_uint) \ + F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, cl_uint) \ + F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, cl_uint) \ + F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, cl_uint) \ + F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, cl_uint) \ + F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, cl_uint) \ + F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, cl_uint) \ + F(cl_device_info, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, cl_uint) \ + F(cl_device_info, CL_DEVICE_DOUBLE_FP_CONFIG, cl_device_fp_config) \ + F(cl_device_info, CL_DEVICE_HALF_FP_CONFIG, cl_device_fp_config) \ + F(cl_device_info, CL_DEVICE_OPENCL_C_VERSION, string) \ + \ + F(cl_mem_info, CL_MEM_ASSOCIATED_MEMOBJECT, cl::Memory) \ + F(cl_mem_info, CL_MEM_OFFSET, size_type) \ + \ + F(cl_kernel_work_group_info, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, size_type) \ + F(cl_kernel_work_group_info, CL_KERNEL_PRIVATE_MEM_SIZE, cl_ulong) \ + \ + F(cl_event_info, CL_EVENT_CONTEXT, cl::Context) + +#define CL_HPP_PARAM_NAME_INFO_1_2_(F) \ + F(cl_program_info, CL_PROGRAM_NUM_KERNELS, size_type) \ + F(cl_program_info, CL_PROGRAM_KERNEL_NAMES, string) \ + \ + F(cl_program_build_info, CL_PROGRAM_BINARY_TYPE, cl_program_binary_type) \ + \ + F(cl_kernel_info, CL_KERNEL_ATTRIBUTES, string) \ + \ + F(cl_kernel_arg_info, CL_KERNEL_ARG_ADDRESS_QUALIFIER, cl_kernel_arg_address_qualifier) \ + F(cl_kernel_arg_info, CL_KERNEL_ARG_ACCESS_QUALIFIER, cl_kernel_arg_access_qualifier) \ + F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_NAME, string) \ + F(cl_kernel_arg_info, CL_KERNEL_ARG_NAME, string) \ + F(cl_kernel_arg_info, CL_KERNEL_ARG_TYPE_QUALIFIER, cl_kernel_arg_type_qualifier) \ + \ + F(cl_device_info, CL_DEVICE_PARENT_DEVICE, cl::Device) \ + F(cl_device_info, CL_DEVICE_PARTITION_PROPERTIES, cl::vector) \ + F(cl_device_info, CL_DEVICE_PARTITION_TYPE, cl::vector) \ + F(cl_device_info, CL_DEVICE_REFERENCE_COUNT, cl_uint) \ + F(cl_device_info, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, size_type) \ + F(cl_device_info, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, cl_device_affinity_domain) \ + F(cl_device_info, CL_DEVICE_BUILT_IN_KERNELS, string) \ + \ + F(cl_image_info, CL_IMAGE_ARRAY_SIZE, size_type) \ + F(cl_image_info, CL_IMAGE_NUM_MIP_LEVELS, cl_uint) \ + F(cl_image_info, CL_IMAGE_NUM_SAMPLES, cl_uint) + +#define CL_HPP_PARAM_NAME_INFO_2_0_(F) \ + F(cl_device_info, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES, cl_command_queue_properties) \ + F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES, cl_command_queue_properties) \ + F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE, cl_uint) \ + F(cl_device_info, CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_QUEUES, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_ON_DEVICE_EVENTS, cl_uint) \ + F(cl_device_info, CL_DEVICE_MAX_PIPE_ARGS, cl_uint) \ + F(cl_device_info, CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS, cl_uint) \ + F(cl_device_info, CL_DEVICE_PIPE_MAX_PACKET_SIZE, cl_uint) \ + F(cl_device_info, CL_DEVICE_SVM_CAPABILITIES, cl_device_svm_capabilities) \ + F(cl_device_info, CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT, cl_uint) \ + F(cl_device_info, CL_DEVICE_PREFERRED_GLOBAL_ATOMIC_ALIGNMENT, cl_uint) \ + F(cl_device_info, CL_DEVICE_PREFERRED_LOCAL_ATOMIC_ALIGNMENT, cl_uint) \ + F(cl_command_queue_info, CL_QUEUE_SIZE, cl_uint) \ + F(cl_mem_info, CL_MEM_USES_SVM_POINTER, cl_bool) \ + F(cl_program_build_info, CL_PROGRAM_BUILD_GLOBAL_VARIABLE_TOTAL_SIZE, size_type) \ + F(cl_pipe_info, CL_PIPE_PACKET_SIZE, cl_uint) \ + F(cl_pipe_info, CL_PIPE_MAX_PACKETS, cl_uint) + +#define CL_HPP_PARAM_NAME_DEVICE_FISSION_(F) \ + F(cl_device_info, CL_DEVICE_PARENT_DEVICE_EXT, cl_device_id) \ + F(cl_device_info, CL_DEVICE_PARTITION_TYPES_EXT, cl::vector) \ + F(cl_device_info, CL_DEVICE_AFFINITY_DOMAINS_EXT, cl::vector) \ + F(cl_device_info, CL_DEVICE_REFERENCE_COUNT_EXT , cl_uint) \ + F(cl_device_info, CL_DEVICE_PARTITION_STYLE_EXT, cl::vector) + +template +struct param_traits {}; + +#define CL_HPP_DECLARE_PARAM_TRAITS_(token, param_name, T) \ +struct token; \ +template<> \ +struct param_traits \ +{ \ + enum { value = param_name }; \ + typedef T param_type; \ +}; + +CL_HPP_PARAM_NAME_INFO_1_0_(CL_HPP_DECLARE_PARAM_TRAITS_) +#if CL_HPP_TARGET_OPENCL_VERSION >= 110 +CL_HPP_PARAM_NAME_INFO_1_1_(CL_HPP_DECLARE_PARAM_TRAITS_) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +CL_HPP_PARAM_NAME_INFO_1_2_(CL_HPP_DECLARE_PARAM_TRAITS_) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +CL_HPP_PARAM_NAME_INFO_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_) +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 + + +// Flags deprecated in OpenCL 2.0 +#define CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(F) \ + F(cl_device_info, CL_DEVICE_QUEUE_PROPERTIES, cl_command_queue_properties) + +#define CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(F) \ + F(cl_device_info, CL_DEVICE_HOST_UNIFIED_MEMORY, cl_bool) + +#define CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(F) \ + F(cl_image_info, CL_IMAGE_BUFFER, cl::Buffer) + +// Include deprecated query flags based on versions +// Only include deprecated 1.0 flags if 2.0 not active as there is an enum clash +#if CL_HPP_TARGET_OPENCL_VERSION > 100 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 && CL_HPP_TARGET_OPENCL_VERSION < 200 +CL_HPP_PARAM_NAME_INFO_1_0_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_) +#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 110 +#if CL_HPP_TARGET_OPENCL_VERSION > 110 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 +CL_HPP_PARAM_NAME_INFO_1_1_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_) +#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120 +#if CL_HPP_TARGET_OPENCL_VERSION > 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 200 +CL_HPP_PARAM_NAME_INFO_1_2_DEPRECATED_IN_2_0_(CL_HPP_DECLARE_PARAM_TRAITS_) +#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 200 + +#if defined(CL_HPP_USE_CL_DEVICE_FISSION) +CL_HPP_PARAM_NAME_DEVICE_FISSION_(CL_HPP_DECLARE_PARAM_TRAITS_); +#endif // CL_HPP_USE_CL_DEVICE_FISSION + +#ifdef CL_PLATFORM_ICD_SUFFIX_KHR +CL_HPP_DECLARE_PARAM_TRAITS_(cl_platform_info, CL_PLATFORM_ICD_SUFFIX_KHR, string) +#endif + +#ifdef CL_DEVICE_PROFILING_TIMER_OFFSET_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_PROFILING_TIMER_OFFSET_AMD, cl_ulong) +#endif + +#ifdef CL_DEVICE_GLOBAL_FREE_MEMORY_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_FREE_MEMORY_AMD, vector) +#endif +#ifdef CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_PER_COMPUTE_UNIT_AMD, cl_uint) +#endif +#ifdef CL_DEVICE_SIMD_WIDTH_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_WIDTH_AMD, cl_uint) +#endif +#ifdef CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, cl_uint) +#endif +#ifdef CL_DEVICE_WAVEFRONT_WIDTH_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WAVEFRONT_WIDTH_AMD, cl_uint) +#endif +#ifdef CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNELS_AMD, cl_uint) +#endif +#ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANKS_AMD, cl_uint) +#endif +#ifdef CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GLOBAL_MEM_CHANNEL_BANK_WIDTH_AMD, cl_uint) +#endif +#ifdef CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_SIZE_PER_COMPUTE_UNIT_AMD, cl_uint) +#endif +#ifdef CL_DEVICE_LOCAL_MEM_BANKS_AMD +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_LOCAL_MEM_BANKS_AMD, cl_uint) +#endif + +#ifdef CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV, cl_uint) +#endif +#ifdef CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV, cl_uint) +#endif +#ifdef CL_DEVICE_REGISTERS_PER_BLOCK_NV +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_REGISTERS_PER_BLOCK_NV, cl_uint) +#endif +#ifdef CL_DEVICE_WARP_SIZE_NV +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_WARP_SIZE_NV, cl_uint) +#endif +#ifdef CL_DEVICE_GPU_OVERLAP_NV +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_GPU_OVERLAP_NV, cl_bool) +#endif +#ifdef CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV, cl_bool) +#endif +#ifdef CL_DEVICE_INTEGRATED_MEMORY_NV +CL_HPP_DECLARE_PARAM_TRAITS_(cl_device_info, CL_DEVICE_INTEGRATED_MEMORY_NV, cl_bool) +#endif + +// Convenience functions + +template +inline cl_int +getInfo(Func f, cl_uint name, T* param) +{ + return getInfoHelper(f, name, param, 0); +} + +template +struct GetInfoFunctor0 +{ + Func f_; const Arg0& arg0_; + cl_int operator ()( + cl_uint param, size_type size, void* value, size_type* size_ret) + { return f_(arg0_, param, size, value, size_ret); } +}; + +template +struct GetInfoFunctor1 +{ + Func f_; const Arg0& arg0_; const Arg1& arg1_; + cl_int operator ()( + cl_uint param, size_type size, void* value, size_type* size_ret) + { return f_(arg0_, arg1_, param, size, value, size_ret); } +}; + +template +inline cl_int +getInfo(Func f, const Arg0& arg0, cl_uint name, T* param) +{ + GetInfoFunctor0 f0 = { f, arg0 }; + return getInfoHelper(f0, name, param, 0); +} + +template +inline cl_int +getInfo(Func f, const Arg0& arg0, const Arg1& arg1, cl_uint name, T* param) +{ + GetInfoFunctor1 f0 = { f, arg0, arg1 }; + return getInfoHelper(f0, name, param, 0); +} + + +template +struct ReferenceHandler +{ }; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +/** + * OpenCL 1.2 devices do have retain/release. + */ +template <> +struct ReferenceHandler +{ + /** + * Retain the device. + * \param device A valid device created using createSubDevices + * \return + * CL_SUCCESS if the function executed successfully. + * CL_INVALID_DEVICE if device was not a valid subdevice + * CL_OUT_OF_RESOURCES + * CL_OUT_OF_HOST_MEMORY + */ + static cl_int retain(cl_device_id device) + { return ::clRetainDevice(device); } + /** + * Retain the device. + * \param device A valid device created using createSubDevices + * \return + * CL_SUCCESS if the function executed successfully. + * CL_INVALID_DEVICE if device was not a valid subdevice + * CL_OUT_OF_RESOURCES + * CL_OUT_OF_HOST_MEMORY + */ + static cl_int release(cl_device_id device) + { return ::clReleaseDevice(device); } +}; +#else // CL_HPP_TARGET_OPENCL_VERSION >= 120 +/** + * OpenCL 1.1 devices do not have retain/release. + */ +template <> +struct ReferenceHandler +{ + // cl_device_id does not have retain(). + static cl_int retain(cl_device_id) + { return CL_SUCCESS; } + // cl_device_id does not have release(). + static cl_int release(cl_device_id) + { return CL_SUCCESS; } +}; +#endif // ! (CL_HPP_TARGET_OPENCL_VERSION >= 120) + +template <> +struct ReferenceHandler +{ + // cl_platform_id does not have retain(). + static cl_int retain(cl_platform_id) + { return CL_SUCCESS; } + // cl_platform_id does not have release(). + static cl_int release(cl_platform_id) + { return CL_SUCCESS; } +}; + +template <> +struct ReferenceHandler +{ + static cl_int retain(cl_context context) + { return ::clRetainContext(context); } + static cl_int release(cl_context context) + { return ::clReleaseContext(context); } +}; + +template <> +struct ReferenceHandler +{ + static cl_int retain(cl_command_queue queue) + { return ::clRetainCommandQueue(queue); } + static cl_int release(cl_command_queue queue) + { return ::clReleaseCommandQueue(queue); } +}; + +template <> +struct ReferenceHandler +{ + static cl_int retain(cl_mem memory) + { return ::clRetainMemObject(memory); } + static cl_int release(cl_mem memory) + { return ::clReleaseMemObject(memory); } +}; + +template <> +struct ReferenceHandler +{ + static cl_int retain(cl_sampler sampler) + { return ::clRetainSampler(sampler); } + static cl_int release(cl_sampler sampler) + { return ::clReleaseSampler(sampler); } +}; + +template <> +struct ReferenceHandler +{ + static cl_int retain(cl_program program) + { return ::clRetainProgram(program); } + static cl_int release(cl_program program) + { return ::clReleaseProgram(program); } +}; + +template <> +struct ReferenceHandler +{ + static cl_int retain(cl_kernel kernel) + { return ::clRetainKernel(kernel); } + static cl_int release(cl_kernel kernel) + { return ::clReleaseKernel(kernel); } +}; + +template <> +struct ReferenceHandler +{ + static cl_int retain(cl_event event) + { return ::clRetainEvent(event); } + static cl_int release(cl_event event) + { return ::clReleaseEvent(event); } +}; + + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120 +// Extracts version number with major in the upper 16 bits, minor in the lower 16 +static cl_uint getVersion(const vector &versionInfo) +{ + int highVersion = 0; + int lowVersion = 0; + int index = 7; + while(versionInfo[index] != '.' ) { + highVersion *= 10; + highVersion += versionInfo[index]-'0'; + ++index; + } + ++index; + while(versionInfo[index] != ' ' && versionInfo[index] != '\0') { + lowVersion *= 10; + lowVersion += versionInfo[index]-'0'; + ++index; + } + return (highVersion << 16) | lowVersion; +} + +static cl_uint getPlatformVersion(cl_platform_id platform) +{ + size_type size = 0; + clGetPlatformInfo(platform, CL_PLATFORM_VERSION, 0, NULL, &size); + + vector versionInfo(size); + clGetPlatformInfo(platform, CL_PLATFORM_VERSION, size, versionInfo.data(), &size); + return getVersion(versionInfo); +} + +static cl_uint getDevicePlatformVersion(cl_device_id device) +{ + cl_platform_id platform; + clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform), &platform, NULL); + return getPlatformVersion(platform); +} + +static cl_uint getContextPlatformVersion(cl_context context) +{ + // The platform cannot be queried directly, so we first have to grab a + // device and obtain its context + size_type size = 0; + clGetContextInfo(context, CL_CONTEXT_DEVICES, 0, NULL, &size); + if (size == 0) + return 0; + vector devices(size/sizeof(cl_device_id)); + clGetContextInfo(context, CL_CONTEXT_DEVICES, size, devices.data(), NULL); + return getDevicePlatformVersion(devices[0]); +} +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120 + +template +class Wrapper +{ +public: + typedef T cl_type; + +protected: + cl_type object_; + +public: + Wrapper() : object_(NULL) { } + + Wrapper(const cl_type &obj, bool retainObject) : object_(obj) + { + if (retainObject) { + detail::errHandler(retain(), __RETAIN_ERR); + } + } + + ~Wrapper() + { + if (object_ != NULL) { release(); } + } + + Wrapper(const Wrapper& rhs) + { + object_ = rhs.object_; + detail::errHandler(retain(), __RETAIN_ERR); + } + + Wrapper(Wrapper&& rhs) CL_HPP_NOEXCEPT_ + { + object_ = rhs.object_; + rhs.object_ = NULL; + } + + Wrapper& operator = (const Wrapper& rhs) + { + if (this != &rhs) { + detail::errHandler(release(), __RELEASE_ERR); + object_ = rhs.object_; + detail::errHandler(retain(), __RETAIN_ERR); + } + return *this; + } + + Wrapper& operator = (Wrapper&& rhs) + { + if (this != &rhs) { + detail::errHandler(release(), __RELEASE_ERR); + object_ = rhs.object_; + rhs.object_ = NULL; + } + return *this; + } + + Wrapper& operator = (const cl_type &rhs) + { + detail::errHandler(release(), __RELEASE_ERR); + object_ = rhs; + return *this; + } + + const cl_type& operator ()() const { return object_; } + + cl_type& operator ()() { return object_; } + + const cl_type get() const { return object_; } + + cl_type get() { return object_; } + + +protected: + template + friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type); + + cl_int retain() const + { + if (object_ != nullptr) { + return ReferenceHandler::retain(object_); + } + else { + return CL_SUCCESS; + } + } + + cl_int release() const + { + if (object_ != nullptr) { + return ReferenceHandler::release(object_); + } + else { + return CL_SUCCESS; + } + } +}; + +template <> +class Wrapper +{ +public: + typedef cl_device_id cl_type; + +protected: + cl_type object_; + bool referenceCountable_; + + static bool isReferenceCountable(cl_device_id device) + { + bool retVal = false; +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +#if CL_HPP_MINIMUM_OPENCL_VERSION < 120 + if (device != NULL) { + int version = getDevicePlatformVersion(device); + if(version > ((1 << 16) + 1)) { + retVal = true; + } + } +#else // CL_HPP_MINIMUM_OPENCL_VERSION < 120 + retVal = true; +#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120 +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + return retVal; + } + +public: + Wrapper() : object_(NULL), referenceCountable_(false) + { + } + + Wrapper(const cl_type &obj, bool retainObject) : + object_(obj), + referenceCountable_(false) + { + referenceCountable_ = isReferenceCountable(obj); + + if (retainObject) { + detail::errHandler(retain(), __RETAIN_ERR); + } + } + + ~Wrapper() + { + release(); + } + + Wrapper(const Wrapper& rhs) + { + object_ = rhs.object_; + referenceCountable_ = isReferenceCountable(object_); + detail::errHandler(retain(), __RETAIN_ERR); + } + + Wrapper(Wrapper&& rhs) CL_HPP_NOEXCEPT_ + { + object_ = rhs.object_; + referenceCountable_ = rhs.referenceCountable_; + rhs.object_ = NULL; + rhs.referenceCountable_ = false; + } + + Wrapper& operator = (const Wrapper& rhs) + { + if (this != &rhs) { + detail::errHandler(release(), __RELEASE_ERR); + object_ = rhs.object_; + referenceCountable_ = rhs.referenceCountable_; + detail::errHandler(retain(), __RETAIN_ERR); + } + return *this; + } + + Wrapper& operator = (Wrapper&& rhs) + { + if (this != &rhs) { + detail::errHandler(release(), __RELEASE_ERR); + object_ = rhs.object_; + referenceCountable_ = rhs.referenceCountable_; + rhs.object_ = NULL; + rhs.referenceCountable_ = false; + } + return *this; + } + + Wrapper& operator = (const cl_type &rhs) + { + detail::errHandler(release(), __RELEASE_ERR); + object_ = rhs; + referenceCountable_ = isReferenceCountable(object_); + return *this; + } + + const cl_type& operator ()() const { return object_; } + + cl_type& operator ()() { return object_; } + + const cl_type get() const { return object_; } + + cl_type get() { return object_; } + +protected: + template + friend inline cl_int getInfoHelper(Func, cl_uint, U*, int, typename U::cl_type); + + template + friend inline cl_int getInfoHelper(Func, cl_uint, vector*, int, typename U::cl_type); + + cl_int retain() const + { + if( object_ != nullptr && referenceCountable_ ) { + return ReferenceHandler::retain(object_); + } + else { + return CL_SUCCESS; + } + } + + cl_int release() const + { + if (object_ != nullptr && referenceCountable_) { + return ReferenceHandler::release(object_); + } + else { + return CL_SUCCESS; + } + } +}; + +template +inline bool operator==(const Wrapper &lhs, const Wrapper &rhs) +{ + return lhs() == rhs(); +} + +template +inline bool operator!=(const Wrapper &lhs, const Wrapper &rhs) +{ + return !operator==(lhs, rhs); +} + +} // namespace detail +//! \endcond + + +using BuildLogType = vector::param_type>>; +#if defined(CL_HPP_ENABLE_EXCEPTIONS) +/** +* Exception class for build errors to carry build info +*/ +class BuildError : public Error +{ +private: + BuildLogType buildLogs; +public: + BuildError(cl_int err, const char * errStr, const BuildLogType &vec) : Error(err, errStr), buildLogs(vec) + { + } + + BuildLogType getBuildLog() const + { + return buildLogs; + } +}; +namespace detail { + static inline cl_int buildErrHandler( + cl_int err, + const char * errStr, + const BuildLogType &buildLogs) + { + if (err != CL_SUCCESS) { + throw BuildError(err, errStr, buildLogs); + } + return err; + } +} // namespace detail + +#else +namespace detail { + static inline cl_int buildErrHandler( + cl_int err, + const char * errStr, + const BuildLogType &buildLogs) + { + (void)buildLogs; // suppress unused variable warning + (void)errStr; + return err; + } +} // namespace detail +#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS) + + +/*! \stuct ImageFormat + * \brief Adds constructors and member functions for cl_image_format. + * + * \see cl_image_format + */ +struct ImageFormat : public cl_image_format +{ + //! \brief Default constructor - performs no initialization. + ImageFormat(){} + + //! \brief Initializing constructor. + ImageFormat(cl_channel_order order, cl_channel_type type) + { + image_channel_order = order; + image_channel_data_type = type; + } + + //! \brief Assignment operator. + ImageFormat& operator = (const ImageFormat& rhs) + { + if (this != &rhs) { + this->image_channel_data_type = rhs.image_channel_data_type; + this->image_channel_order = rhs.image_channel_order; + } + return *this; + } +}; + +/*! \brief Class interface for cl_device_id. + * + * \note Copies of these objects are inexpensive, since they don't 'own' + * any underlying resources or data structures. + * + * \see cl_device_id + */ +class Device : public detail::Wrapper +{ +private: + static std::once_flag default_initialized_; + static Device default_; + static cl_int default_error_; + + /*! \brief Create the default context. + * + * This sets @c default_ and @c default_error_. It does not throw + * @c cl::Error. + */ + static void makeDefault(); + + /*! \brief Create the default platform from a provided platform. + * + * This sets @c default_. It does not throw + * @c cl::Error. + */ + static void makeDefaultProvided(const Device &p) { + default_ = p; + } + +public: +#ifdef CL_HPP_UNIT_TEST_ENABLE + /*! \brief Reset the default. + * + * This sets @c default_ to an empty value to support cleanup in + * the unit test framework. + * This function is not thread safe. + */ + static void unitTestClearDefault() { + default_ = Device(); + } +#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE + + //! \brief Default constructor - initializes to NULL. + Device() : detail::Wrapper() { } + + /*! \brief Constructor from cl_device_id. + * + * This simply copies the device ID value, which is an inexpensive operation. + */ + explicit Device(const cl_device_id &device, bool retainObject = false) : + detail::Wrapper(device, retainObject) { } + + /*! \brief Returns the first device on the default context. + * + * \see Context::getDefault() + */ + static Device getDefault( + cl_int *errResult = NULL) + { + std::call_once(default_initialized_, makeDefault); + detail::errHandler(default_error_); + if (errResult != NULL) { + *errResult = default_error_; + } + return default_; + } + + /** + * Modify the default device to be used by + * subsequent operations. + * Will only set the default if no default was previously created. + * @return updated default device. + * Should be compared to the passed value to ensure that it was updated. + */ + static Device setDefault(const Device &default_device) + { + std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_device)); + detail::errHandler(default_error_); + return default_; + } + + /*! \brief Assignment operator from cl_device_id. + * + * This simply copies the device ID value, which is an inexpensive operation. + */ + Device& operator = (const cl_device_id& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Device(const Device& dev) : detail::Wrapper(dev) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Device& operator = (const Device &dev) + { + detail::Wrapper::operator=(dev); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Device(Device&& dev) CL_HPP_NOEXCEPT_ : detail::Wrapper(std::move(dev)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Device& operator = (Device &&dev) + { + detail::Wrapper::operator=(std::move(dev)); + return *this; + } + + //! \brief Wrapper for clGetDeviceInfo(). + template + cl_int getInfo(cl_device_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetDeviceInfo, object_, name, param), + __GET_DEVICE_INFO_ERR); + } + + //! \brief Wrapper for clGetDeviceInfo() that returns by value. + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_device_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + /** + * CL 1.2 version + */ +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + //! \brief Wrapper for clCreateSubDevices(). + cl_int createSubDevices( + const cl_device_partition_property * properties, + vector* devices) + { + cl_uint n = 0; + cl_int err = clCreateSubDevices(object_, properties, 0, NULL, &n); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR); + } + + vector ids(n); + err = clCreateSubDevices(object_, properties, n, ids.data(), NULL); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR); + } + + // Cannot trivially assign because we need to capture intermediates + // with safe construction + if (devices) { + devices->resize(ids.size()); + + // Assign to param, constructing with retain behaviour + // to correctly capture each underlying CL object + for (size_type i = 0; i < ids.size(); i++) { + // We do not need to retain because this device is being created + // by the runtime + (*devices)[i] = Device(ids[i], false); + } + } + + return CL_SUCCESS; + } +#elif defined(CL_HPP_USE_CL_DEVICE_FISSION) + +/** + * CL 1.1 version that uses device fission extension. + */ + cl_int createSubDevices( + const cl_device_partition_property_ext * properties, + vector* devices) + { + typedef CL_API_ENTRY cl_int + ( CL_API_CALL * PFN_clCreateSubDevicesEXT)( + cl_device_id /*in_device*/, + const cl_device_partition_property_ext * /* properties */, + cl_uint /*num_entries*/, + cl_device_id * /*out_devices*/, + cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1; + + static PFN_clCreateSubDevicesEXT pfn_clCreateSubDevicesEXT = NULL; + CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateSubDevicesEXT); + + cl_uint n = 0; + cl_int err = pfn_clCreateSubDevicesEXT(object_, properties, 0, NULL, &n); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR); + } + + vector ids(n); + err = pfn_clCreateSubDevicesEXT(object_, properties, n, ids.data(), NULL); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __CREATE_SUB_DEVICES_ERR); + } + // Cannot trivially assign because we need to capture intermediates + // with safe construction + if (devices) { + devices->resize(ids.size()); + + // Assign to param, constructing with retain behaviour + // to correctly capture each underlying CL object + for (size_type i = 0; i < ids.size(); i++) { + // We do not need to retain because this device is being created + // by the runtime + (*devices)[i] = Device(ids[i], false); + } + } + return CL_SUCCESS; + } +#endif // defined(CL_HPP_USE_CL_DEVICE_FISSION) +}; + +CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Device::default_initialized_; +CL_HPP_DEFINE_STATIC_MEMBER_ Device Device::default_; +CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Device::default_error_ = CL_SUCCESS; + +/*! \brief Class interface for cl_platform_id. + * + * \note Copies of these objects are inexpensive, since they don't 'own' + * any underlying resources or data structures. + * + * \see cl_platform_id + */ +class Platform : public detail::Wrapper +{ +private: + static std::once_flag default_initialized_; + static Platform default_; + static cl_int default_error_; + + /*! \brief Create the default context. + * + * This sets @c default_ and @c default_error_. It does not throw + * @c cl::Error. + */ + static void makeDefault() { + /* Throwing an exception from a call_once invocation does not do + * what we wish, so we catch it and save the error. + */ +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + try +#endif + { + // If default wasn't passed ,generate one + // Otherwise set it + cl_uint n = 0; + + cl_int err = ::clGetPlatformIDs(0, NULL, &n); + if (err != CL_SUCCESS) { + default_error_ = err; + return; + } + if (n == 0) { + default_error_ = CL_INVALID_PLATFORM; + return; + } + + vector ids(n); + err = ::clGetPlatformIDs(n, ids.data(), NULL); + if (err != CL_SUCCESS) { + default_error_ = err; + return; + } + + default_ = Platform(ids[0]); + } +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + catch (cl::Error &e) { + default_error_ = e.err(); + } +#endif + } + + /*! \brief Create the default platform from a provided platform. + * + * This sets @c default_. It does not throw + * @c cl::Error. + */ + static void makeDefaultProvided(const Platform &p) { + default_ = p; + } + +public: +#ifdef CL_HPP_UNIT_TEST_ENABLE + /*! \brief Reset the default. + * + * This sets @c default_ to an empty value to support cleanup in + * the unit test framework. + * This function is not thread safe. + */ + static void unitTestClearDefault() { + default_ = Platform(); + } +#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE + + //! \brief Default constructor - initializes to NULL. + Platform() : detail::Wrapper() { } + + /*! \brief Constructor from cl_platform_id. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * This simply copies the platform ID value, which is an inexpensive operation. + */ + explicit Platform(const cl_platform_id &platform, bool retainObject = false) : + detail::Wrapper(platform, retainObject) { } + + /*! \brief Assignment operator from cl_platform_id. + * + * This simply copies the platform ID value, which is an inexpensive operation. + */ + Platform& operator = (const cl_platform_id& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + static Platform getDefault( + cl_int *errResult = NULL) + { + std::call_once(default_initialized_, makeDefault); + detail::errHandler(default_error_); + if (errResult != NULL) { + *errResult = default_error_; + } + return default_; + } + + /** + * Modify the default platform to be used by + * subsequent operations. + * Will only set the default if no default was previously created. + * @return updated default platform. + * Should be compared to the passed value to ensure that it was updated. + */ + static Platform setDefault(const Platform &default_platform) + { + std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_platform)); + detail::errHandler(default_error_); + return default_; + } + + //! \brief Wrapper for clGetPlatformInfo(). + cl_int getInfo(cl_platform_info name, string* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetPlatformInfo, object_, name, param), + __GET_PLATFORM_INFO_ERR); + } + + //! \brief Wrapper for clGetPlatformInfo() that returns by value. + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_platform_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + /*! \brief Gets a list of devices for this platform. + * + * Wraps clGetDeviceIDs(). + */ + cl_int getDevices( + cl_device_type type, + vector* devices) const + { + cl_uint n = 0; + if( devices == NULL ) { + return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR); + } + cl_int err = ::clGetDeviceIDs(object_, type, 0, NULL, &n); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __GET_DEVICE_IDS_ERR); + } + + vector ids(n); + err = ::clGetDeviceIDs(object_, type, n, ids.data(), NULL); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __GET_DEVICE_IDS_ERR); + } + + // Cannot trivially assign because we need to capture intermediates + // with safe construction + // We must retain things we obtain from the API to avoid releasing + // API-owned objects. + if (devices) { + devices->resize(ids.size()); + + // Assign to param, constructing with retain behaviour + // to correctly capture each underlying CL object + for (size_type i = 0; i < ids.size(); i++) { + (*devices)[i] = Device(ids[i], true); + } + } + return CL_SUCCESS; + } + +#if defined(CL_HPP_USE_DX_INTEROP) + /*! \brief Get the list of available D3D10 devices. + * + * \param d3d_device_source. + * + * \param d3d_object. + * + * \param d3d_device_set. + * + * \param devices returns a vector of OpenCL D3D10 devices found. The cl::Device + * values returned in devices can be used to identify a specific OpenCL + * device. If \a devices argument is NULL, this argument is ignored. + * + * \return One of the following values: + * - CL_SUCCESS if the function is executed successfully. + * + * The application can query specific capabilities of the OpenCL device(s) + * returned by cl::getDevices. This can be used by the application to + * determine which device(s) to use. + * + * \note In the case that exceptions are enabled and a return value + * other than CL_SUCCESS is generated, then cl::Error exception is + * generated. + */ + cl_int getDevices( + cl_d3d10_device_source_khr d3d_device_source, + void * d3d_object, + cl_d3d10_device_set_khr d3d_device_set, + vector* devices) const + { + typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clGetDeviceIDsFromD3D10KHR)( + cl_platform_id platform, + cl_d3d10_device_source_khr d3d_device_source, + void * d3d_object, + cl_d3d10_device_set_khr d3d_device_set, + cl_uint num_entries, + cl_device_id * devices, + cl_uint* num_devices); + + if( devices == NULL ) { + return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_DEVICE_IDS_ERR); + } + + static PFN_clGetDeviceIDsFromD3D10KHR pfn_clGetDeviceIDsFromD3D10KHR = NULL; + CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(object_, clGetDeviceIDsFromD3D10KHR); + + cl_uint n = 0; + cl_int err = pfn_clGetDeviceIDsFromD3D10KHR( + object_, + d3d_device_source, + d3d_object, + d3d_device_set, + 0, + NULL, + &n); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __GET_DEVICE_IDS_ERR); + } + + vector ids(n); + err = pfn_clGetDeviceIDsFromD3D10KHR( + object_, + d3d_device_source, + d3d_object, + d3d_device_set, + n, + ids.data(), + NULL); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __GET_DEVICE_IDS_ERR); + } + + // Cannot trivially assign because we need to capture intermediates + // with safe construction + // We must retain things we obtain from the API to avoid releasing + // API-owned objects. + if (devices) { + devices->resize(ids.size()); + + // Assign to param, constructing with retain behaviour + // to correctly capture each underlying CL object + for (size_type i = 0; i < ids.size(); i++) { + (*devices)[i] = Device(ids[i], true); + } + } + return CL_SUCCESS; + } +#endif + + /*! \brief Gets a list of available platforms. + * + * Wraps clGetPlatformIDs(). + */ + static cl_int get( + vector* platforms) + { + cl_uint n = 0; + + if( platforms == NULL ) { + return detail::errHandler(CL_INVALID_ARG_VALUE, __GET_PLATFORM_IDS_ERR); + } + + cl_int err = ::clGetPlatformIDs(0, NULL, &n); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __GET_PLATFORM_IDS_ERR); + } + + vector ids(n); + err = ::clGetPlatformIDs(n, ids.data(), NULL); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __GET_PLATFORM_IDS_ERR); + } + + if (platforms) { + platforms->resize(ids.size()); + + // Platforms don't reference count + for (size_type i = 0; i < ids.size(); i++) { + (*platforms)[i] = Platform(ids[i]); + } + } + return CL_SUCCESS; + } + + /*! \brief Gets the first available platform. + * + * Wraps clGetPlatformIDs(), returning the first result. + */ + static cl_int get( + Platform * platform) + { + cl_int err; + Platform default_platform = Platform::getDefault(&err); + if (platform) { + *platform = default_platform; + } + return err; + } + + /*! \brief Gets the first available platform, returning it by value. + * + * \return Returns a valid platform if one is available. + * If no platform is available will return a null platform. + * Throws an exception if no platforms are available + * or an error condition occurs. + * Wraps clGetPlatformIDs(), returning the first result. + */ + static Platform get( + cl_int * errResult = NULL) + { + cl_int err; + Platform default_platform = Platform::getDefault(&err); + if (errResult) { + *errResult = err; + } + return default_platform; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + //! \brief Wrapper for clUnloadCompiler(). + cl_int + unloadCompiler() + { + return ::clUnloadPlatformCompiler(object_); + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 +}; // class Platform + +CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Platform::default_initialized_; +CL_HPP_DEFINE_STATIC_MEMBER_ Platform Platform::default_; +CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Platform::default_error_ = CL_SUCCESS; + + +/** + * Deprecated APIs for 1.2 + */ +#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) +/** + * Unload the OpenCL compiler. + * \note Deprecated for OpenCL 1.2. Use Platform::unloadCompiler instead. + */ +inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int +UnloadCompiler() CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; +inline cl_int +UnloadCompiler() +{ + return ::clUnloadCompiler(); +} +#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) + +/*! \brief Class interface for cl_context. + * + * \note Copies of these objects are shallow, meaning that the copy will refer + * to the same underlying cl_context as the original. For details, see + * clRetainContext() and clReleaseContext(). + * + * \see cl_context + */ +class Context + : public detail::Wrapper +{ +private: + static std::once_flag default_initialized_; + static Context default_; + static cl_int default_error_; + + /*! \brief Create the default context from the default device type in the default platform. + * + * This sets @c default_ and @c default_error_. It does not throw + * @c cl::Error. + */ + static void makeDefault() { + /* Throwing an exception from a call_once invocation does not do + * what we wish, so we catch it and save the error. + */ +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + try +#endif + { +#if !defined(__APPLE__) && !defined(__MACOS) + const Platform &p = Platform::getDefault(); + cl_platform_id defaultPlatform = p(); + cl_context_properties properties[3] = { + CL_CONTEXT_PLATFORM, (cl_context_properties)defaultPlatform, 0 + }; +#else // #if !defined(__APPLE__) && !defined(__MACOS) + cl_context_properties *properties = nullptr; +#endif // #if !defined(__APPLE__) && !defined(__MACOS) + + default_ = Context( + CL_DEVICE_TYPE_DEFAULT, + properties, + NULL, + NULL, + &default_error_); + } +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + catch (cl::Error &e) { + default_error_ = e.err(); + } +#endif + } + + + /*! \brief Create the default context from a provided Context. + * + * This sets @c default_. It does not throw + * @c cl::Error. + */ + static void makeDefaultProvided(const Context &c) { + default_ = c; + } + +public: +#ifdef CL_HPP_UNIT_TEST_ENABLE + /*! \brief Reset the default. + * + * This sets @c default_ to an empty value to support cleanup in + * the unit test framework. + * This function is not thread safe. + */ + static void unitTestClearDefault() { + default_ = Context(); + } +#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE + + /*! \brief Constructs a context including a list of specified devices. + * + * Wraps clCreateContext(). + */ + Context( + const vector& devices, + cl_context_properties* properties = NULL, + void (CL_CALLBACK * notifyFptr)( + const char *, + const void *, + size_type, + void *) = NULL, + void* data = NULL, + cl_int* err = NULL) + { + cl_int error; + + size_type numDevices = devices.size(); + vector deviceIDs(numDevices); + + for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) { + deviceIDs[deviceIndex] = (devices[deviceIndex])(); + } + + object_ = ::clCreateContext( + properties, (cl_uint) numDevices, + deviceIDs.data(), + notifyFptr, data, &error); + + detail::errHandler(error, __CREATE_CONTEXT_ERR); + if (err != NULL) { + *err = error; + } + } + + Context( + const Device& device, + cl_context_properties* properties = NULL, + void (CL_CALLBACK * notifyFptr)( + const char *, + const void *, + size_type, + void *) = NULL, + void* data = NULL, + cl_int* err = NULL) + { + cl_int error; + + cl_device_id deviceID = device(); + + object_ = ::clCreateContext( + properties, 1, + &deviceID, + notifyFptr, data, &error); + + detail::errHandler(error, __CREATE_CONTEXT_ERR); + if (err != NULL) { + *err = error; + } + } + + /*! \brief Constructs a context including all or a subset of devices of a specified type. + * + * Wraps clCreateContextFromType(). + */ + Context( + cl_device_type type, + cl_context_properties* properties = NULL, + void (CL_CALLBACK * notifyFptr)( + const char *, + const void *, + size_type, + void *) = NULL, + void* data = NULL, + cl_int* err = NULL) + { + cl_int error; + +#if !defined(__APPLE__) && !defined(__MACOS) + cl_context_properties prop[4] = {CL_CONTEXT_PLATFORM, 0, 0, 0 }; + + if (properties == NULL) { + // Get a valid platform ID as we cannot send in a blank one + vector platforms; + error = Platform::get(&platforms); + if (error != CL_SUCCESS) { + detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR); + if (err != NULL) { + *err = error; + } + return; + } + + // Check the platforms we found for a device of our specified type + cl_context_properties platform_id = 0; + for (unsigned int i = 0; i < platforms.size(); i++) { + + vector devices; + +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + try { +#endif + + error = platforms[i].getDevices(type, &devices); + +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + } catch (Error) {} + // Catch if exceptions are enabled as we don't want to exit if first platform has no devices of type + // We do error checking next anyway, and can throw there if needed +#endif + + // Only squash CL_SUCCESS and CL_DEVICE_NOT_FOUND + if (error != CL_SUCCESS && error != CL_DEVICE_NOT_FOUND) { + detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR); + if (err != NULL) { + *err = error; + } + } + + if (devices.size() > 0) { + platform_id = (cl_context_properties)platforms[i](); + break; + } + } + + if (platform_id == 0) { + detail::errHandler(CL_DEVICE_NOT_FOUND, __CREATE_CONTEXT_FROM_TYPE_ERR); + if (err != NULL) { + *err = CL_DEVICE_NOT_FOUND; + } + return; + } + + prop[1] = platform_id; + properties = &prop[0]; + } +#endif + object_ = ::clCreateContextFromType( + properties, type, notifyFptr, data, &error); + + detail::errHandler(error, __CREATE_CONTEXT_FROM_TYPE_ERR); + if (err != NULL) { + *err = error; + } + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Context(const Context& ctx) : detail::Wrapper(ctx) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Context& operator = (const Context &ctx) + { + detail::Wrapper::operator=(ctx); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Context(Context&& ctx) CL_HPP_NOEXCEPT_ : detail::Wrapper(std::move(ctx)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Context& operator = (Context &&ctx) + { + detail::Wrapper::operator=(std::move(ctx)); + return *this; + } + + + /*! \brief Returns a singleton context including all devices of CL_DEVICE_TYPE_DEFAULT. + * + * \note All calls to this function return the same cl_context as the first. + */ + static Context getDefault(cl_int * err = NULL) + { + std::call_once(default_initialized_, makeDefault); + detail::errHandler(default_error_); + if (err != NULL) { + *err = default_error_; + } + return default_; + } + + /** + * Modify the default context to be used by + * subsequent operations. + * Will only set the default if no default was previously created. + * @return updated default context. + * Should be compared to the passed value to ensure that it was updated. + */ + static Context setDefault(const Context &default_context) + { + std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_context)); + detail::errHandler(default_error_); + return default_; + } + + //! \brief Default constructor - initializes to NULL. + Context() : detail::Wrapper() { } + + /*! \brief Constructor from cl_context - takes ownership. + * + * This effectively transfers ownership of a refcount on the cl_context + * into the new Context object. + */ + explicit Context(const cl_context& context, bool retainObject = false) : + detail::Wrapper(context, retainObject) { } + + /*! \brief Assignment operator from cl_context - takes ownership. + * + * This effectively transfers ownership of a refcount on the rhs and calls + * clReleaseContext() on the value previously held by this instance. + */ + Context& operator = (const cl_context& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + //! \brief Wrapper for clGetContextInfo(). + template + cl_int getInfo(cl_context_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetContextInfo, object_, name, param), + __GET_CONTEXT_INFO_ERR); + } + + //! \brief Wrapper for clGetContextInfo() that returns by value. + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_context_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + /*! \brief Gets a list of supported image formats. + * + * Wraps clGetSupportedImageFormats(). + */ + cl_int getSupportedImageFormats( + cl_mem_flags flags, + cl_mem_object_type type, + vector* formats) const + { + cl_uint numEntries; + + if (!formats) { + return CL_SUCCESS; + } + + cl_int err = ::clGetSupportedImageFormats( + object_, + flags, + type, + 0, + NULL, + &numEntries); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR); + } + + if (numEntries > 0) { + vector value(numEntries); + err = ::clGetSupportedImageFormats( + object_, + flags, + type, + numEntries, + (cl_image_format*)value.data(), + NULL); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __GET_SUPPORTED_IMAGE_FORMATS_ERR); + } + + formats->assign(begin(value), end(value)); + } + else { + // If no values are being returned, ensure an empty vector comes back + formats->clear(); + } + + return CL_SUCCESS; + } +}; + +inline void Device::makeDefault() +{ + /* Throwing an exception from a call_once invocation does not do + * what we wish, so we catch it and save the error. + */ +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + try +#endif + { + cl_int error = 0; + + Context context = Context::getDefault(&error); + detail::errHandler(error, __CREATE_CONTEXT_ERR); + + if (error != CL_SUCCESS) { + default_error_ = error; + } + else { + default_ = context.getInfo()[0]; + default_error_ = CL_SUCCESS; + } + } +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + catch (cl::Error &e) { + default_error_ = e.err(); + } +#endif +} + +CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag Context::default_initialized_; +CL_HPP_DEFINE_STATIC_MEMBER_ Context Context::default_; +CL_HPP_DEFINE_STATIC_MEMBER_ cl_int Context::default_error_ = CL_SUCCESS; + +/*! \brief Class interface for cl_event. + * + * \note Copies of these objects are shallow, meaning that the copy will refer + * to the same underlying cl_event as the original. For details, see + * clRetainEvent() and clReleaseEvent(). + * + * \see cl_event + */ +class Event : public detail::Wrapper +{ +public: + //! \brief Default constructor - initializes to NULL. + Event() : detail::Wrapper() { } + + /*! \brief Constructor from cl_event - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * This effectively transfers ownership of a refcount on the cl_event + * into the new Event object. + */ + explicit Event(const cl_event& event, bool retainObject = false) : + detail::Wrapper(event, retainObject) { } + + /*! \brief Assignment operator from cl_event - takes ownership. + * + * This effectively transfers ownership of a refcount on the rhs and calls + * clReleaseEvent() on the value previously held by this instance. + */ + Event& operator = (const cl_event& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + //! \brief Wrapper for clGetEventInfo(). + template + cl_int getInfo(cl_event_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetEventInfo, object_, name, param), + __GET_EVENT_INFO_ERR); + } + + //! \brief Wrapper for clGetEventInfo() that returns by value. + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_event_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + //! \brief Wrapper for clGetEventProfilingInfo(). + template + cl_int getProfilingInfo(cl_profiling_info name, T* param) const + { + return detail::errHandler(detail::getInfo( + &::clGetEventProfilingInfo, object_, name, param), + __GET_EVENT_PROFILE_INFO_ERR); + } + + //! \brief Wrapper for clGetEventProfilingInfo() that returns by value. + template typename + detail::param_traits::param_type + getProfilingInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_profiling_info, name>::param_type param; + cl_int result = getProfilingInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + /*! \brief Blocks the calling thread until this event completes. + * + * Wraps clWaitForEvents(). + */ + cl_int wait() const + { + return detail::errHandler( + ::clWaitForEvents(1, &object_), + __WAIT_FOR_EVENTS_ERR); + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 110 + /*! \brief Registers a user callback function for a specific command execution status. + * + * Wraps clSetEventCallback(). + */ + cl_int setCallback( + cl_int type, + void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void *), + void * user_data = NULL) + { + return detail::errHandler( + ::clSetEventCallback( + object_, + type, + pfn_notify, + user_data), + __SET_EVENT_CALLBACK_ERR); + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 + + /*! \brief Blocks the calling thread until every event specified is complete. + * + * Wraps clWaitForEvents(). + */ + static cl_int + waitForEvents(const vector& events) + { + return detail::errHandler( + ::clWaitForEvents( + (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL), + __WAIT_FOR_EVENTS_ERR); + } +}; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 110 +/*! \brief Class interface for user events (a subset of cl_event's). + * + * See Event for details about copy semantics, etc. + */ +class UserEvent : public Event +{ +public: + /*! \brief Constructs a user event on a given context. + * + * Wraps clCreateUserEvent(). + */ + UserEvent( + const Context& context, + cl_int * err = NULL) + { + cl_int error; + object_ = ::clCreateUserEvent( + context(), + &error); + + detail::errHandler(error, __CREATE_USER_EVENT_ERR); + if (err != NULL) { + *err = error; + } + } + + //! \brief Default constructor - initializes to NULL. + UserEvent() : Event() { } + + /*! \brief Sets the execution status of a user event object. + * + * Wraps clSetUserEventStatus(). + */ + cl_int setStatus(cl_int status) + { + return detail::errHandler( + ::clSetUserEventStatus(object_,status), + __SET_USER_EVENT_STATUS_ERR); + } +}; +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 + +/*! \brief Blocks the calling thread until every event specified is complete. + * + * Wraps clWaitForEvents(). + */ +inline static cl_int +WaitForEvents(const vector& events) +{ + return detail::errHandler( + ::clWaitForEvents( + (cl_uint) events.size(), (events.size() > 0) ? (cl_event*)&events.front() : NULL), + __WAIT_FOR_EVENTS_ERR); +} + +/*! \brief Class interface for cl_mem. + * + * \note Copies of these objects are shallow, meaning that the copy will refer + * to the same underlying cl_mem as the original. For details, see + * clRetainMemObject() and clReleaseMemObject(). + * + * \see cl_mem + */ +class Memory : public detail::Wrapper +{ +public: + //! \brief Default constructor - initializes to NULL. + Memory() : detail::Wrapper() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * Optionally transfer ownership of a refcount on the cl_mem + * into the new Memory object. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * + * See Memory for further details. + */ + explicit Memory(const cl_mem& memory, bool retainObject) : + detail::Wrapper(memory, retainObject) { } + + /*! \brief Assignment operator from cl_mem - takes ownership. + * + * This effectively transfers ownership of a refcount on the rhs and calls + * clReleaseMemObject() on the value previously held by this instance. + */ + Memory& operator = (const cl_mem& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Memory(const Memory& mem) : detail::Wrapper(mem) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Memory& operator = (const Memory &mem) + { + detail::Wrapper::operator=(mem); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Memory(Memory&& mem) CL_HPP_NOEXCEPT_ : detail::Wrapper(std::move(mem)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Memory& operator = (Memory &&mem) + { + detail::Wrapper::operator=(std::move(mem)); + return *this; + } + + + //! \brief Wrapper for clGetMemObjectInfo(). + template + cl_int getInfo(cl_mem_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetMemObjectInfo, object_, name, param), + __GET_MEM_OBJECT_INFO_ERR); + } + + //! \brief Wrapper for clGetMemObjectInfo() that returns by value. + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_mem_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 110 + /*! \brief Registers a callback function to be called when the memory object + * is no longer needed. + * + * Wraps clSetMemObjectDestructorCallback(). + * + * Repeated calls to this function, for a given cl_mem value, will append + * to the list of functions called (in reverse order) when memory object's + * resources are freed and the memory object is deleted. + * + * \note + * The registered callbacks are associated with the underlying cl_mem + * value - not the Memory class instance. + */ + cl_int setDestructorCallback( + void (CL_CALLBACK * pfn_notify)(cl_mem, void *), + void * user_data = NULL) + { + return detail::errHandler( + ::clSetMemObjectDestructorCallback( + object_, + pfn_notify, + user_data), + __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR); + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 + +}; + +// Pre-declare copy functions +class Buffer; +template< typename IteratorType > +cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer ); +template< typename IteratorType > +cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator ); +template< typename IteratorType > +cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer ); +template< typename IteratorType > +cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator ); + + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +namespace detail +{ + class SVMTraitNull + { + public: + static cl_svm_mem_flags getSVMMemFlags() + { + return 0; + } + }; +} // namespace detail + +template +class SVMTraitReadWrite +{ +public: + static cl_svm_mem_flags getSVMMemFlags() + { + return CL_MEM_READ_WRITE | + Trait::getSVMMemFlags(); + } +}; + +template +class SVMTraitReadOnly +{ +public: + static cl_svm_mem_flags getSVMMemFlags() + { + return CL_MEM_READ_ONLY | + Trait::getSVMMemFlags(); + } +}; + +template +class SVMTraitWriteOnly +{ +public: + static cl_svm_mem_flags getSVMMemFlags() + { + return CL_MEM_WRITE_ONLY | + Trait::getSVMMemFlags(); + } +}; + +template> +class SVMTraitCoarse +{ +public: + static cl_svm_mem_flags getSVMMemFlags() + { + return Trait::getSVMMemFlags(); + } +}; + +template> +class SVMTraitFine +{ +public: + static cl_svm_mem_flags getSVMMemFlags() + { + return CL_MEM_SVM_FINE_GRAIN_BUFFER | + Trait::getSVMMemFlags(); + } +}; + +template> +class SVMTraitAtomic +{ +public: + static cl_svm_mem_flags getSVMMemFlags() + { + return + CL_MEM_SVM_FINE_GRAIN_BUFFER | + CL_MEM_SVM_ATOMICS | + Trait::getSVMMemFlags(); + } +}; + +// Pre-declare SVM map function +template +inline cl_int enqueueMapSVM( + T* ptr, + cl_bool blocking, + cl_map_flags flags, + size_type size, + const vector* events = NULL, + Event* event = NULL); + +/** + * STL-like allocator class for managing SVM objects provided for convenience. + * + * Note that while this behaves like an allocator for the purposes of constructing vectors and similar objects, + * care must be taken when using with smart pointers. + * The allocator should not be used to construct a unique_ptr if we are using coarse-grained SVM mode because + * the coarse-grained management behaviour would behave incorrectly with respect to reference counting. + * + * Instead the allocator embeds a Deleter which may be used with unique_ptr and is used + * with the allocate_shared and allocate_ptr supplied operations. + */ +template +class SVMAllocator { +private: + Context context_; + +public: + typedef T value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + template + struct rebind + { + typedef SVMAllocator other; + }; + + template + friend class SVMAllocator; + + SVMAllocator() : + context_(Context::getDefault()) + { + } + + explicit SVMAllocator(cl::Context context) : + context_(context) + { + } + + + SVMAllocator(const SVMAllocator &other) : + context_(other.context_) + { + } + + template + SVMAllocator(const SVMAllocator &other) : + context_(other.context_) + { + } + + ~SVMAllocator() + { + } + + pointer address(reference r) CL_HPP_NOEXCEPT_ + { + return std::addressof(r); + } + + const_pointer address(const_reference r) CL_HPP_NOEXCEPT_ + { + return std::addressof(r); + } + + /** + * Allocate an SVM pointer. + * + * If the allocator is coarse-grained, this will take ownership to allow + * containers to correctly construct data in place. + */ + pointer allocate( + size_type size, + typename cl::SVMAllocator::const_pointer = 0) + { + // Allocate memory with default alignment matching the size of the type + void* voidPointer = + clSVMAlloc( + context_(), + SVMTrait::getSVMMemFlags(), + size*sizeof(T), + 0); + pointer retValue = reinterpret_cast( + voidPointer); +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + if (!retValue) { + std::bad_alloc excep; + throw excep; + } +#endif // #if defined(CL_HPP_ENABLE_EXCEPTIONS) + + // If allocation was coarse-grained then map it + if (!(SVMTrait::getSVMMemFlags() & CL_MEM_SVM_FINE_GRAIN_BUFFER)) { + cl_int err = enqueueMapSVM(retValue, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, size*sizeof(T)); + if (err != CL_SUCCESS) { + std::bad_alloc excep; + throw excep; + } + } + + // If exceptions disabled, return null pointer from allocator + return retValue; + } + + void deallocate(pointer p, size_type) + { + clSVMFree(context_(), p); + } + + /** + * Return the maximum possible allocation size. + * This is the minimum of the maximum sizes of all devices in the context. + */ + size_type max_size() const CL_HPP_NOEXCEPT_ + { + size_type maxSize = std::numeric_limits::max() / sizeof(T); + + for (Device &d : context_.getInfo()) { + maxSize = std::min( + maxSize, + static_cast(d.getInfo())); + } + + return maxSize; + } + + template< class U, class... Args > + void construct(U* p, Args&&... args) + { + new(p)T(args...); + } + + template< class U > + void destroy(U* p) + { + p->~U(); + } + + /** + * Returns true if the contexts match. + */ + inline bool operator==(SVMAllocator const& rhs) + { + return (context_==rhs.context_); + } + + inline bool operator!=(SVMAllocator const& a) + { + return !operator==(a); + } +}; // class SVMAllocator return cl::pointer(tmp, detail::Deleter{alloc, copies}); + + +template +class SVMAllocator { +public: + typedef void value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + + template + struct rebind + { + typedef SVMAllocator other; + }; + + template + friend class SVMAllocator; +}; + +#if !defined(CL_HPP_NO_STD_UNIQUE_PTR) +namespace detail +{ + template + class Deleter { + private: + Alloc alloc_; + size_type copies_; + + public: + typedef typename std::allocator_traits::pointer pointer; + + Deleter(const Alloc &alloc, size_type copies) : alloc_{ alloc }, copies_{ copies } + { + } + + void operator()(pointer ptr) const { + Alloc tmpAlloc{ alloc_ }; + std::allocator_traits::destroy(tmpAlloc, std::addressof(*ptr)); + std::allocator_traits::deallocate(tmpAlloc, ptr, copies_); + } + }; +} // namespace detail + +/** + * Allocation operation compatible with std::allocate_ptr. + * Creates a unique_ptr by default. + * This requirement is to ensure that the control block is not + * allocated in memory inaccessible to the host. + */ +template +cl::pointer> allocate_pointer(const Alloc &alloc_, Args&&... args) +{ + Alloc alloc(alloc_); + static const size_type copies = 1; + + // Ensure that creation of the management block and the + // object are dealt with separately such that we only provide a deleter + + T* tmp = std::allocator_traits::allocate(alloc, copies); + if (!tmp) { + std::bad_alloc excep; + throw excep; + } + try { + std::allocator_traits::construct( + alloc, + std::addressof(*tmp), + std::forward(args)...); + + return cl::pointer>(tmp, detail::Deleter{alloc, copies}); + } + catch (std::bad_alloc b) + { + std::allocator_traits::deallocate(alloc, tmp, copies); + throw; + } +} + +template< class T, class SVMTrait, class... Args > +cl::pointer>> allocate_svm(Args... args) +{ + SVMAllocator alloc; + return cl::allocate_pointer(alloc, args...); +} + +template< class T, class SVMTrait, class... Args > +cl::pointer>> allocate_svm(const cl::Context &c, Args... args) +{ + SVMAllocator alloc(c); + return cl::allocate_pointer(alloc, args...); +} +#endif // #if !defined(CL_HPP_NO_STD_UNIQUE_PTR) + +/*! \brief Vector alias to simplify contruction of coarse-grained SVM containers. + * + */ +template < class T > +using coarse_svm_vector = vector>>; + +/*! \brief Vector alias to simplify contruction of fine-grained SVM containers. +* +*/ +template < class T > +using fine_svm_vector = vector>>; + +/*! \brief Vector alias to simplify contruction of fine-grained SVM containers that support platform atomics. +* +*/ +template < class T > +using atomic_svm_vector = vector>>; + +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + + +/*! \brief Class interface for Buffer Memory Objects. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class Buffer : public Memory +{ +public: + + /*! \brief Constructs a Buffer in a specified context. + * + * Wraps clCreateBuffer(). + * + * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was + * specified. Note alignment & exclusivity requirements. + */ + Buffer( + const Context& context, + cl_mem_flags flags, + size_type size, + void* host_ptr = NULL, + cl_int* err = NULL) + { + cl_int error; + object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error); + + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + } + + /*! \brief Constructs a Buffer in the default context. + * + * Wraps clCreateBuffer(). + * + * \param host_ptr Storage to be used if the CL_MEM_USE_HOST_PTR flag was + * specified. Note alignment & exclusivity requirements. + * + * \see Context::getDefault() + */ + Buffer( + cl_mem_flags flags, + size_type size, + void* host_ptr = NULL, + cl_int* err = NULL) + { + cl_int error; + + Context context = Context::getDefault(err); + + object_ = ::clCreateBuffer(context(), flags, size, host_ptr, &error); + + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + } + + /*! + * \brief Construct a Buffer from a host container via iterators. + * IteratorType must be random access. + * If useHostPtr is specified iterators must represent contiguous data. + */ + template< typename IteratorType > + Buffer( + IteratorType startIterator, + IteratorType endIterator, + bool readOnly, + bool useHostPtr = false, + cl_int* err = NULL) + { + typedef typename std::iterator_traits::value_type DataType; + cl_int error; + + cl_mem_flags flags = 0; + if( readOnly ) { + flags |= CL_MEM_READ_ONLY; + } + else { + flags |= CL_MEM_READ_WRITE; + } + if( useHostPtr ) { + flags |= CL_MEM_USE_HOST_PTR; + } + + size_type size = sizeof(DataType)*(endIterator - startIterator); + + Context context = Context::getDefault(err); + + if( useHostPtr ) { + object_ = ::clCreateBuffer(context(), flags, size, static_cast(&*startIterator), &error); + } else { + object_ = ::clCreateBuffer(context(), flags, size, 0, &error); + } + + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + + if( !useHostPtr ) { + error = cl::copy(startIterator, endIterator, *this); + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + } + } + + /*! + * \brief Construct a Buffer from a host container via iterators using a specified context. + * IteratorType must be random access. + * If useHostPtr is specified iterators must represent contiguous data. + */ + template< typename IteratorType > + Buffer(const Context &context, IteratorType startIterator, IteratorType endIterator, + bool readOnly, bool useHostPtr = false, cl_int* err = NULL); + + /*! + * \brief Construct a Buffer from a host container via iterators using a specified queue. + * If useHostPtr is specified iterators must be random access. + */ + template< typename IteratorType > + Buffer(const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, + bool readOnly, bool useHostPtr = false, cl_int* err = NULL); + + //! \brief Default constructor - initializes to NULL. + Buffer() : Memory() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with earlier versions. + * + * See Memory for further details. + */ + explicit Buffer(const cl_mem& buffer, bool retainObject = false) : + Memory(buffer, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + Buffer& operator = (const cl_mem& rhs) + { + Memory::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Buffer(const Buffer& buf) : Memory(buf) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Buffer& operator = (const Buffer &buf) + { + Memory::operator=(buf); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Buffer(Buffer&& buf) CL_HPP_NOEXCEPT_ : Memory(std::move(buf)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Buffer& operator = (Buffer &&buf) + { + Memory::operator=(std::move(buf)); + return *this; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 110 + /*! \brief Creates a new buffer object from this. + * + * Wraps clCreateSubBuffer(). + */ + Buffer createSubBuffer( + cl_mem_flags flags, + cl_buffer_create_type buffer_create_type, + const void * buffer_create_info, + cl_int * err = NULL) + { + Buffer result; + cl_int error; + result.object_ = ::clCreateSubBuffer( + object_, + flags, + buffer_create_type, + buffer_create_info, + &error); + + detail::errHandler(error, __CREATE_SUBBUFFER_ERR); + if (err != NULL) { + *err = error; + } + + return result; + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 +}; + +#if defined (CL_HPP_USE_DX_INTEROP) +/*! \brief Class interface for creating OpenCL buffers from ID3D10Buffer's. + * + * This is provided to facilitate interoperability with Direct3D. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class BufferD3D10 : public Buffer +{ +public: + + + /*! \brief Constructs a BufferD3D10, in a specified context, from a + * given ID3D10Buffer. + * + * Wraps clCreateFromD3D10BufferKHR(). + */ + BufferD3D10( + const Context& context, + cl_mem_flags flags, + ID3D10Buffer* bufobj, + cl_int * err = NULL) : pfn_clCreateFromD3D10BufferKHR(nullptr) + { + typedef CL_API_ENTRY cl_mem (CL_API_CALL *PFN_clCreateFromD3D10BufferKHR)( + cl_context context, cl_mem_flags flags, ID3D10Buffer* buffer, + cl_int* errcode_ret); + PFN_clCreateFromD3D10BufferKHR pfn_clCreateFromD3D10BufferKHR; +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + vector props = context.getInfo(); + cl_platform platform = -1; + for( int i = 0; i < props.size(); ++i ) { + if( props[i] == CL_CONTEXT_PLATFORM ) { + platform = props[i+1]; + } + } + CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clCreateFromD3D10BufferKHR); +#elif CL_HPP_TARGET_OPENCL_VERSION >= 110 + CL_HPP_INIT_CL_EXT_FCN_PTR_(clCreateFromD3D10BufferKHR); +#endif + + cl_int error; + object_ = pfn_clCreateFromD3D10BufferKHR( + context(), + flags, + bufobj, + &error); + + detail::errHandler(error, __CREATE_GL_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + } + + //! \brief Default constructor - initializes to NULL. + BufferD3D10() : Buffer() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit BufferD3D10(const cl_mem& buffer, bool retainObject = false) : + Buffer(buffer, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + BufferD3D10& operator = (const cl_mem& rhs) + { + Buffer::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + BufferD3D10(const BufferD3D10& buf) : + Buffer(buf) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + BufferD3D10& operator = (const BufferD3D10 &buf) + { + Buffer::operator=(buf); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + BufferD3D10(BufferD3D10&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + BufferD3D10& operator = (BufferD3D10 &&buf) + { + Buffer::operator=(std::move(buf)); + return *this; + } +}; +#endif + +/*! \brief Class interface for GL Buffer Memory Objects. + * + * This is provided to facilitate interoperability with OpenGL. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class BufferGL : public Buffer +{ +public: + /*! \brief Constructs a BufferGL in a specified context, from a given + * GL buffer. + * + * Wraps clCreateFromGLBuffer(). + */ + BufferGL( + const Context& context, + cl_mem_flags flags, + cl_GLuint bufobj, + cl_int * err = NULL) + { + cl_int error; + object_ = ::clCreateFromGLBuffer( + context(), + flags, + bufobj, + &error); + + detail::errHandler(error, __CREATE_GL_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + } + + //! \brief Default constructor - initializes to NULL. + BufferGL() : Buffer() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit BufferGL(const cl_mem& buffer, bool retainObject = false) : + Buffer(buffer, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + BufferGL& operator = (const cl_mem& rhs) + { + Buffer::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + BufferGL(const BufferGL& buf) : Buffer(buf) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + BufferGL& operator = (const BufferGL &buf) + { + Buffer::operator=(buf); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + BufferGL(BufferGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + BufferGL& operator = (BufferGL &&buf) + { + Buffer::operator=(std::move(buf)); + return *this; + } + + //! \brief Wrapper for clGetGLObjectInfo(). + cl_int getObjectInfo( + cl_gl_object_type *type, + cl_GLuint * gl_object_name) + { + return detail::errHandler( + ::clGetGLObjectInfo(object_,type,gl_object_name), + __GET_GL_OBJECT_INFO_ERR); + } +}; + +/*! \brief Class interface for GL Render Buffer Memory Objects. + * + * This is provided to facilitate interoperability with OpenGL. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class BufferRenderGL : public Buffer +{ +public: + /*! \brief Constructs a BufferRenderGL in a specified context, from a given + * GL Renderbuffer. + * + * Wraps clCreateFromGLRenderbuffer(). + */ + BufferRenderGL( + const Context& context, + cl_mem_flags flags, + cl_GLuint bufobj, + cl_int * err = NULL) + { + cl_int error; + object_ = ::clCreateFromGLRenderbuffer( + context(), + flags, + bufobj, + &error); + + detail::errHandler(error, __CREATE_GL_RENDER_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + } + + //! \brief Default constructor - initializes to NULL. + BufferRenderGL() : Buffer() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit BufferRenderGL(const cl_mem& buffer, bool retainObject = false) : + Buffer(buffer, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + BufferRenderGL& operator = (const cl_mem& rhs) + { + Buffer::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + BufferRenderGL(const BufferRenderGL& buf) : Buffer(buf) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + BufferRenderGL& operator = (const BufferRenderGL &buf) + { + Buffer::operator=(buf); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + BufferRenderGL(BufferRenderGL&& buf) CL_HPP_NOEXCEPT_ : Buffer(std::move(buf)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + BufferRenderGL& operator = (BufferRenderGL &&buf) + { + Buffer::operator=(std::move(buf)); + return *this; + } + + //! \brief Wrapper for clGetGLObjectInfo(). + cl_int getObjectInfo( + cl_gl_object_type *type, + cl_GLuint * gl_object_name) + { + return detail::errHandler( + ::clGetGLObjectInfo(object_,type,gl_object_name), + __GET_GL_OBJECT_INFO_ERR); + } +}; + +/*! \brief C++ base class for Image Memory objects. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class Image : public Memory +{ +protected: + //! \brief Default constructor - initializes to NULL. + Image() : Memory() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image(const cl_mem& image, bool retainObject = false) : + Memory(image, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + Image& operator = (const cl_mem& rhs) + { + Memory::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image(const Image& img) : Memory(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image& operator = (const Image &img) + { + Memory::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image(Image&& img) CL_HPP_NOEXCEPT_ : Memory(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image& operator = (Image &&img) + { + Memory::operator=(std::move(img)); + return *this; + } + + +public: + //! \brief Wrapper for clGetImageInfo(). + template + cl_int getImageInfo(cl_image_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetImageInfo, object_, name, param), + __GET_IMAGE_INFO_ERR); + } + + //! \brief Wrapper for clGetImageInfo() that returns by value. + template typename + detail::param_traits::param_type + getImageInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_image_info, name>::param_type param; + cl_int result = getImageInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } +}; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +/*! \brief Class interface for 1D Image Memory objects. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class Image1D : public Image +{ +public: + /*! \brief Constructs a 1D Image in a specified context. + * + * Wraps clCreateImage(). + */ + Image1D( + const Context& context, + cl_mem_flags flags, + ImageFormat format, + size_type width, + void* host_ptr = NULL, + cl_int* err = NULL) + { + cl_int error; + cl_image_desc desc = + { + CL_MEM_OBJECT_IMAGE1D, + width, + 0, 0, 0, 0, 0, 0, 0, 0 + }; + object_ = ::clCreateImage( + context(), + flags, + &format, + &desc, + host_ptr, + &error); + + detail::errHandler(error, __CREATE_IMAGE_ERR); + if (err != NULL) { + *err = error; + } + } + + //! \brief Default constructor - initializes to NULL. + Image1D() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image1D(const cl_mem& image1D, bool retainObject = false) : + Image(image1D, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + Image1D& operator = (const cl_mem& rhs) + { + Image::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image1D(const Image1D& img) : Image(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image1D& operator = (const Image1D &img) + { + Image::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image1D(Image1D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image1D& operator = (Image1D &&img) + { + Image::operator=(std::move(img)); + return *this; + } + +}; + +/*! \class Image1DBuffer + * \brief Image interface for 1D buffer images. + */ +class Image1DBuffer : public Image +{ +public: + Image1DBuffer( + const Context& context, + cl_mem_flags flags, + ImageFormat format, + size_type width, + const Buffer &buffer, + cl_int* err = NULL) + { + cl_int error; + cl_image_desc desc = + { + CL_MEM_OBJECT_IMAGE1D_BUFFER, + width, + 0, 0, 0, 0, 0, 0, 0, + buffer() + }; + object_ = ::clCreateImage( + context(), + flags, + &format, + &desc, + NULL, + &error); + + detail::errHandler(error, __CREATE_IMAGE_ERR); + if (err != NULL) { + *err = error; + } + } + + Image1DBuffer() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image1DBuffer(const cl_mem& image1D, bool retainObject = false) : + Image(image1D, retainObject) { } + + Image1DBuffer& operator = (const cl_mem& rhs) + { + Image::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image1DBuffer(const Image1DBuffer& img) : Image(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image1DBuffer& operator = (const Image1DBuffer &img) + { + Image::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image1DBuffer(Image1DBuffer&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image1DBuffer& operator = (Image1DBuffer &&img) + { + Image::operator=(std::move(img)); + return *this; + } + +}; + +/*! \class Image1DArray + * \brief Image interface for arrays of 1D images. + */ +class Image1DArray : public Image +{ +public: + Image1DArray( + const Context& context, + cl_mem_flags flags, + ImageFormat format, + size_type arraySize, + size_type width, + size_type rowPitch, + void* host_ptr = NULL, + cl_int* err = NULL) + { + cl_int error; + cl_image_desc desc = + { + CL_MEM_OBJECT_IMAGE1D_ARRAY, + width, + 0, 0, // height, depth (unused) + arraySize, + rowPitch, + 0, 0, 0, 0 + }; + object_ = ::clCreateImage( + context(), + flags, + &format, + &desc, + host_ptr, + &error); + + detail::errHandler(error, __CREATE_IMAGE_ERR); + if (err != NULL) { + *err = error; + } + } + + Image1DArray() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image1DArray(const cl_mem& imageArray, bool retainObject = false) : + Image(imageArray, retainObject) { } + + + Image1DArray& operator = (const cl_mem& rhs) + { + Image::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image1DArray(const Image1DArray& img) : Image(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image1DArray& operator = (const Image1DArray &img) + { + Image::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image1DArray(Image1DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image1DArray& operator = (Image1DArray &&img) + { + Image::operator=(std::move(img)); + return *this; + } + +}; +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120 + + +/*! \brief Class interface for 2D Image Memory objects. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class Image2D : public Image +{ +public: + /*! \brief Constructs a 2D Image in a specified context. + * + * Wraps clCreateImage(). + */ + Image2D( + const Context& context, + cl_mem_flags flags, + ImageFormat format, + size_type width, + size_type height, + size_type row_pitch = 0, + void* host_ptr = NULL, + cl_int* err = NULL) + { + cl_int error; + bool useCreateImage; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120 + // Run-time decision based on the actual platform + { + cl_uint version = detail::getContextPlatformVersion(context()); + useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above + } +#elif CL_HPP_TARGET_OPENCL_VERSION >= 120 + useCreateImage = true; +#else + useCreateImage = false; +#endif + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + if (useCreateImage) + { + cl_image_desc desc = + { + CL_MEM_OBJECT_IMAGE2D, + width, + height, + 0, 0, // depth, array size (unused) + row_pitch, + 0, 0, 0, 0 + }; + object_ = ::clCreateImage( + context(), + flags, + &format, + &desc, + host_ptr, + &error); + + detail::errHandler(error, __CREATE_IMAGE_ERR); + if (err != NULL) { + *err = error; + } + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 +#if CL_HPP_MINIMUM_OPENCL_VERSION < 120 + if (!useCreateImage) + { + object_ = ::clCreateImage2D( + context(), flags,&format, width, height, row_pitch, host_ptr, &error); + + detail::errHandler(error, __CREATE_IMAGE2D_ERR); + if (err != NULL) { + *err = error; + } + } +#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120 + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + /*! \brief Constructs a 2D Image from a buffer. + * \note This will share storage with the underlying buffer. + * + * Wraps clCreateImage(). + */ + Image2D( + const Context& context, + ImageFormat format, + const Buffer &sourceBuffer, + size_type width, + size_type height, + size_type row_pitch = 0, + cl_int* err = nullptr) + { + cl_int error; + + cl_image_desc desc = + { + CL_MEM_OBJECT_IMAGE2D, + width, + height, + 0, 0, // depth, array size (unused) + row_pitch, + 0, 0, 0, + // Use buffer as input to image + sourceBuffer() + }; + object_ = ::clCreateImage( + context(), + 0, // flags inherited from buffer + &format, + &desc, + nullptr, + &error); + + detail::errHandler(error, __CREATE_IMAGE_ERR); + if (err != nullptr) { + *err = error; + } + } +#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + /*! \brief Constructs a 2D Image from an image. + * \note This will share storage with the underlying image but may + * reinterpret the channel order and type. + * + * The image will be created matching with a descriptor matching the source. + * + * \param order is the channel order to reinterpret the image data as. + * The channel order may differ as described in the OpenCL + * 2.0 API specification. + * + * Wraps clCreateImage(). + */ + Image2D( + const Context& context, + cl_channel_order order, + const Image &sourceImage, + cl_int* err = nullptr) + { + cl_int error; + + // Descriptor fields have to match source image + size_type sourceWidth = + sourceImage.getImageInfo(); + size_type sourceHeight = + sourceImage.getImageInfo(); + size_type sourceRowPitch = + sourceImage.getImageInfo(); + cl_uint sourceNumMIPLevels = + sourceImage.getImageInfo(); + cl_uint sourceNumSamples = + sourceImage.getImageInfo(); + cl_image_format sourceFormat = + sourceImage.getImageInfo(); + + // Update only the channel order. + // Channel format inherited from source. + sourceFormat.image_channel_order = order; + cl_image_desc desc = + { + CL_MEM_OBJECT_IMAGE2D, + sourceWidth, + sourceHeight, + 0, 0, // depth (unused), array size (unused) + sourceRowPitch, + 0, // slice pitch (unused) + sourceNumMIPLevels, + sourceNumSamples, + // Use buffer as input to image + sourceImage() + }; + object_ = ::clCreateImage( + context(), + 0, // flags should be inherited from mem_object + &sourceFormat, + &desc, + nullptr, + &error); + + detail::errHandler(error, __CREATE_IMAGE_ERR); + if (err != nullptr) { + *err = error; + } + } +#endif //#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + + //! \brief Default constructor - initializes to NULL. + Image2D() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image2D(const cl_mem& image2D, bool retainObject = false) : + Image(image2D, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + Image2D& operator = (const cl_mem& rhs) + { + Image::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image2D(const Image2D& img) : Image(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image2D& operator = (const Image2D &img) + { + Image::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image2D(Image2D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image2D& operator = (Image2D &&img) + { + Image::operator=(std::move(img)); + return *this; + } + +}; + + +#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) +/*! \brief Class interface for GL 2D Image Memory objects. + * + * This is provided to facilitate interoperability with OpenGL. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + * \note Deprecated for OpenCL 1.2. Please use ImageGL instead. + */ +class CL_EXT_PREFIX__VERSION_1_1_DEPRECATED Image2DGL : public Image2D +{ +public: + /*! \brief Constructs an Image2DGL in a specified context, from a given + * GL Texture. + * + * Wraps clCreateFromGLTexture2D(). + */ + Image2DGL( + const Context& context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texobj, + cl_int * err = NULL) + { + cl_int error; + object_ = ::clCreateFromGLTexture2D( + context(), + flags, + target, + miplevel, + texobj, + &error); + + detail::errHandler(error, __CREATE_GL_TEXTURE_2D_ERR); + if (err != NULL) { + *err = error; + } + + } + + //! \brief Default constructor - initializes to NULL. + Image2DGL() : Image2D() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image2DGL(const cl_mem& image, bool retainObject = false) : + Image2D(image, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + *c + * See Memory for further details. + */ + Image2DGL& operator = (const cl_mem& rhs) + { + Image2D::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image2DGL(const Image2DGL& img) : Image2D(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image2DGL& operator = (const Image2DGL &img) + { + Image2D::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image2DGL(Image2DGL&& img) CL_HPP_NOEXCEPT_ : Image2D(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image2DGL& operator = (Image2DGL &&img) + { + Image2D::operator=(std::move(img)); + return *this; + } + +} CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; +#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +/*! \class Image2DArray + * \brief Image interface for arrays of 2D images. + */ +class Image2DArray : public Image +{ +public: + Image2DArray( + const Context& context, + cl_mem_flags flags, + ImageFormat format, + size_type arraySize, + size_type width, + size_type height, + size_type rowPitch, + size_type slicePitch, + void* host_ptr = NULL, + cl_int* err = NULL) + { + cl_int error; + cl_image_desc desc = + { + CL_MEM_OBJECT_IMAGE2D_ARRAY, + width, + height, + 0, // depth (unused) + arraySize, + rowPitch, + slicePitch, + 0, 0, 0 + }; + object_ = ::clCreateImage( + context(), + flags, + &format, + &desc, + host_ptr, + &error); + + detail::errHandler(error, __CREATE_IMAGE_ERR); + if (err != NULL) { + *err = error; + } + } + + Image2DArray() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image2DArray(const cl_mem& imageArray, bool retainObject = false) : Image(imageArray, retainObject) { } + + Image2DArray& operator = (const cl_mem& rhs) + { + Image::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image2DArray(const Image2DArray& img) : Image(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image2DArray& operator = (const Image2DArray &img) + { + Image::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image2DArray(Image2DArray&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image2DArray& operator = (Image2DArray &&img) + { + Image::operator=(std::move(img)); + return *this; + } +}; +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 120 + +/*! \brief Class interface for 3D Image Memory objects. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class Image3D : public Image +{ +public: + /*! \brief Constructs a 3D Image in a specified context. + * + * Wraps clCreateImage(). + */ + Image3D( + const Context& context, + cl_mem_flags flags, + ImageFormat format, + size_type width, + size_type height, + size_type depth, + size_type row_pitch = 0, + size_type slice_pitch = 0, + void* host_ptr = NULL, + cl_int* err = NULL) + { + cl_int error; + bool useCreateImage; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 && CL_HPP_MINIMUM_OPENCL_VERSION < 120 + // Run-time decision based on the actual platform + { + cl_uint version = detail::getContextPlatformVersion(context()); + useCreateImage = (version >= 0x10002); // OpenCL 1.2 or above + } +#elif CL_HPP_TARGET_OPENCL_VERSION >= 120 + useCreateImage = true; +#else + useCreateImage = false; +#endif + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + if (useCreateImage) + { + cl_image_desc desc = + { + CL_MEM_OBJECT_IMAGE3D, + width, + height, + depth, + 0, // array size (unused) + row_pitch, + slice_pitch, + 0, 0, 0 + }; + object_ = ::clCreateImage( + context(), + flags, + &format, + &desc, + host_ptr, + &error); + + detail::errHandler(error, __CREATE_IMAGE_ERR); + if (err != NULL) { + *err = error; + } + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 +#if CL_HPP_MINIMUM_OPENCL_VERSION < 120 + if (!useCreateImage) + { + object_ = ::clCreateImage3D( + context(), flags, &format, width, height, depth, row_pitch, + slice_pitch, host_ptr, &error); + + detail::errHandler(error, __CREATE_IMAGE3D_ERR); + if (err != NULL) { + *err = error; + } + } +#endif // CL_HPP_MINIMUM_OPENCL_VERSION < 120 + } + + //! \brief Default constructor - initializes to NULL. + Image3D() : Image() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image3D(const cl_mem& image3D, bool retainObject = false) : + Image(image3D, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + Image3D& operator = (const cl_mem& rhs) + { + Image::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image3D(const Image3D& img) : Image(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image3D& operator = (const Image3D &img) + { + Image::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image3D(Image3D&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image3D& operator = (Image3D &&img) + { + Image::operator=(std::move(img)); + return *this; + } +}; + +#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) +/*! \brief Class interface for GL 3D Image Memory objects. + * + * This is provided to facilitate interoperability with OpenGL. + * + * See Memory for details about copy semantics, etc. + * + * \see Memory + */ +class Image3DGL : public Image3D +{ +public: + /*! \brief Constructs an Image3DGL in a specified context, from a given + * GL Texture. + * + * Wraps clCreateFromGLTexture3D(). + */ + Image3DGL( + const Context& context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texobj, + cl_int * err = NULL) + { + cl_int error; + object_ = ::clCreateFromGLTexture3D( + context(), + flags, + target, + miplevel, + texobj, + &error); + + detail::errHandler(error, __CREATE_GL_TEXTURE_3D_ERR); + if (err != NULL) { + *err = error; + } + } + + //! \brief Default constructor - initializes to NULL. + Image3DGL() : Image3D() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit Image3DGL(const cl_mem& image, bool retainObject = false) : + Image3D(image, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + Image3DGL& operator = (const cl_mem& rhs) + { + Image3D::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image3DGL(const Image3DGL& img) : Image3D(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Image3DGL& operator = (const Image3DGL &img) + { + Image3D::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Image3DGL(Image3DGL&& img) CL_HPP_NOEXCEPT_ : Image3D(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Image3DGL& operator = (Image3DGL &&img) + { + Image3D::operator=(std::move(img)); + return *this; + } +}; +#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +/*! \class ImageGL + * \brief general image interface for GL interop. + * We abstract the 2D and 3D GL images into a single instance here + * that wraps all GL sourced images on the grounds that setup information + * was performed by OpenCL anyway. + */ +class ImageGL : public Image +{ +public: + ImageGL( + const Context& context, + cl_mem_flags flags, + cl_GLenum target, + cl_GLint miplevel, + cl_GLuint texobj, + cl_int * err = NULL) + { + cl_int error; + object_ = ::clCreateFromGLTexture( + context(), + flags, + target, + miplevel, + texobj, + &error); + + detail::errHandler(error, __CREATE_GL_TEXTURE_ERR); + if (err != NULL) { + *err = error; + } + } + + ImageGL() : Image() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * See Memory for further details. + */ + explicit ImageGL(const cl_mem& image, bool retainObject = false) : + Image(image, retainObject) { } + + ImageGL& operator = (const cl_mem& rhs) + { + Image::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + ImageGL(const ImageGL& img) : Image(img) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + ImageGL& operator = (const ImageGL &img) + { + Image::operator=(img); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + ImageGL(ImageGL&& img) CL_HPP_NOEXCEPT_ : Image(std::move(img)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + ImageGL& operator = (ImageGL &&img) + { + Image::operator=(std::move(img)); + return *this; + } +}; +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + + + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +/*! \brief Class interface for Pipe Memory Objects. +* +* See Memory for details about copy semantics, etc. +* +* \see Memory +*/ +class Pipe : public Memory +{ +public: + + /*! \brief Constructs a Pipe in a specified context. + * + * Wraps clCreatePipe(). + * @param context Context in which to create the pipe. + * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid. + * @param packet_size Size in bytes of a single packet of the pipe. + * @param max_packets Number of packets that may be stored in the pipe. + * + */ + Pipe( + const Context& context, + cl_uint packet_size, + cl_uint max_packets, + cl_int* err = NULL) + { + cl_int error; + + cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS; + object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error); + + detail::errHandler(error, __CREATE_PIPE_ERR); + if (err != NULL) { + *err = error; + } + } + + /*! \brief Constructs a Pipe in a the default context. + * + * Wraps clCreatePipe(). + * @param flags Bitfield. Only CL_MEM_READ_WRITE and CL_MEM_HOST_NO_ACCESS are valid. + * @param packet_size Size in bytes of a single packet of the pipe. + * @param max_packets Number of packets that may be stored in the pipe. + * + */ + Pipe( + cl_uint packet_size, + cl_uint max_packets, + cl_int* err = NULL) + { + cl_int error; + + Context context = Context::getDefault(err); + + cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS; + object_ = ::clCreatePipe(context(), flags, packet_size, max_packets, nullptr, &error); + + detail::errHandler(error, __CREATE_PIPE_ERR); + if (err != NULL) { + *err = error; + } + } + + //! \brief Default constructor - initializes to NULL. + Pipe() : Memory() { } + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with earlier versions. + * + * See Memory for further details. + */ + explicit Pipe(const cl_mem& pipe, bool retainObject = false) : + Memory(pipe, retainObject) { } + + /*! \brief Assignment from cl_mem - performs shallow copy. + * + * See Memory for further details. + */ + Pipe& operator = (const cl_mem& rhs) + { + Memory::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Pipe(const Pipe& pipe) : Memory(pipe) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Pipe& operator = (const Pipe &pipe) + { + Memory::operator=(pipe); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Pipe(Pipe&& pipe) CL_HPP_NOEXCEPT_ : Memory(std::move(pipe)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Pipe& operator = (Pipe &&pipe) + { + Memory::operator=(std::move(pipe)); + return *this; + } + + //! \brief Wrapper for clGetMemObjectInfo(). + template + cl_int getInfo(cl_pipe_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetPipeInfo, object_, name, param), + __GET_PIPE_INFO_ERR); + } + + //! \brief Wrapper for clGetMemObjectInfo() that returns by value. + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_pipe_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } +}; // class Pipe +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200 + + +/*! \brief Class interface for cl_sampler. + * + * \note Copies of these objects are shallow, meaning that the copy will refer + * to the same underlying cl_sampler as the original. For details, see + * clRetainSampler() and clReleaseSampler(). + * + * \see cl_sampler + */ +class Sampler : public detail::Wrapper +{ +public: + //! \brief Default constructor - initializes to NULL. + Sampler() { } + + /*! \brief Constructs a Sampler in a specified context. + * + * Wraps clCreateSampler(). + */ + Sampler( + const Context& context, + cl_bool normalized_coords, + cl_addressing_mode addressing_mode, + cl_filter_mode filter_mode, + cl_int* err = NULL) + { + cl_int error; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + cl_sampler_properties sampler_properties[] = { + CL_SAMPLER_NORMALIZED_COORDS, normalized_coords, + CL_SAMPLER_ADDRESSING_MODE, addressing_mode, + CL_SAMPLER_FILTER_MODE, filter_mode, + 0 }; + object_ = ::clCreateSamplerWithProperties( + context(), + sampler_properties, + &error); + + detail::errHandler(error, __CREATE_SAMPLER_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } +#else + object_ = ::clCreateSampler( + context(), + normalized_coords, + addressing_mode, + filter_mode, + &error); + + detail::errHandler(error, __CREATE_SAMPLER_ERR); + if (err != NULL) { + *err = error; + } +#endif + } + + /*! \brief Constructor from cl_sampler - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * This effectively transfers ownership of a refcount on the cl_sampler + * into the new Sampler object. + */ + explicit Sampler(const cl_sampler& sampler, bool retainObject = false) : + detail::Wrapper(sampler, retainObject) { } + + /*! \brief Assignment operator from cl_sampler - takes ownership. + * + * This effectively transfers ownership of a refcount on the rhs and calls + * clReleaseSampler() on the value previously held by this instance. + */ + Sampler& operator = (const cl_sampler& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Sampler(const Sampler& sam) : detail::Wrapper(sam) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Sampler& operator = (const Sampler &sam) + { + detail::Wrapper::operator=(sam); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Sampler(Sampler&& sam) CL_HPP_NOEXCEPT_ : detail::Wrapper(std::move(sam)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Sampler& operator = (Sampler &&sam) + { + detail::Wrapper::operator=(std::move(sam)); + return *this; + } + + //! \brief Wrapper for clGetSamplerInfo(). + template + cl_int getInfo(cl_sampler_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetSamplerInfo, object_, name, param), + __GET_SAMPLER_INFO_ERR); + } + + //! \brief Wrapper for clGetSamplerInfo() that returns by value. + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_sampler_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } +}; + +class Program; +class CommandQueue; +class DeviceCommandQueue; +class Kernel; + +//! \brief Class interface for specifying NDRange values. +class NDRange +{ +private: + size_type sizes_[3]; + cl_uint dimensions_; + +public: + //! \brief Default constructor - resulting range has zero dimensions. + NDRange() + : dimensions_(0) + { + sizes_[0] = 0; + sizes_[1] = 0; + sizes_[2] = 0; + } + + //! \brief Constructs one-dimensional range. + NDRange(size_type size0) + : dimensions_(1) + { + sizes_[0] = size0; + sizes_[1] = 1; + sizes_[2] = 1; + } + + //! \brief Constructs two-dimensional range. + NDRange(size_type size0, size_type size1) + : dimensions_(2) + { + sizes_[0] = size0; + sizes_[1] = size1; + sizes_[2] = 1; + } + + //! \brief Constructs three-dimensional range. + NDRange(size_type size0, size_type size1, size_type size2) + : dimensions_(3) + { + sizes_[0] = size0; + sizes_[1] = size1; + sizes_[2] = size2; + } + + /*! \brief Conversion operator to const size_type *. + * + * \returns a pointer to the size of the first dimension. + */ + operator const size_type*() const { + return sizes_; + } + + //! \brief Queries the number of dimensions in the range. + size_type dimensions() const + { + return dimensions_; + } + + //! \brief Returns the size of the object in bytes based on the + // runtime number of dimensions + size_type size() const + { + return dimensions_*sizeof(size_type); + } + + size_type* get() + { + return sizes_; + } + + const size_type* get() const + { + return sizes_; + } +}; + +//! \brief A zero-dimensional range. +static const NDRange NullRange; + +//! \brief Local address wrapper for use with Kernel::setArg +struct LocalSpaceArg +{ + size_type size_; +}; + +namespace detail { + +template +struct KernelArgumentHandler; + +// Enable for objects that are not subclasses of memory +// Pointers, constants etc +template +struct KernelArgumentHandler::value>::type> +{ + static size_type size(const T&) { return sizeof(T); } + static const T* ptr(const T& value) { return &value; } +}; + +// Enable for subclasses of memory where we want to get a reference to the cl_mem out +// and pass that in for safety +template +struct KernelArgumentHandler::value>::type> +{ + static size_type size(const T&) { return sizeof(cl_mem); } + static const cl_mem* ptr(const T& value) { return &(value()); } +}; + +// Specialization for DeviceCommandQueue defined later + +template <> +struct KernelArgumentHandler +{ + static size_type size(const LocalSpaceArg& value) { return value.size_; } + static const void* ptr(const LocalSpaceArg&) { return NULL; } +}; + +} +//! \endcond + +/*! Local + * \brief Helper function for generating LocalSpaceArg objects. + */ +inline LocalSpaceArg +Local(size_type size) +{ + LocalSpaceArg ret = { size }; + return ret; +} + +/*! \brief Class interface for cl_kernel. + * + * \note Copies of these objects are shallow, meaning that the copy will refer + * to the same underlying cl_kernel as the original. For details, see + * clRetainKernel() and clReleaseKernel(). + * + * \see cl_kernel + */ +class Kernel : public detail::Wrapper +{ +public: + inline Kernel(const Program& program, const char* name, cl_int* err = NULL); + + //! \brief Default constructor - initializes to NULL. + Kernel() { } + + /*! \brief Constructor from cl_kernel - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + * This effectively transfers ownership of a refcount on the cl_kernel + * into the new Kernel object. + */ + explicit Kernel(const cl_kernel& kernel, bool retainObject = false) : + detail::Wrapper(kernel, retainObject) { } + + /*! \brief Assignment operator from cl_kernel - takes ownership. + * + * This effectively transfers ownership of a refcount on the rhs and calls + * clReleaseKernel() on the value previously held by this instance. + */ + Kernel& operator = (const cl_kernel& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Kernel(const Kernel& kernel) : detail::Wrapper(kernel) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Kernel& operator = (const Kernel &kernel) + { + detail::Wrapper::operator=(kernel); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Kernel(Kernel&& kernel) CL_HPP_NOEXCEPT_ : detail::Wrapper(std::move(kernel)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Kernel& operator = (Kernel &&kernel) + { + detail::Wrapper::operator=(std::move(kernel)); + return *this; + } + + template + cl_int getInfo(cl_kernel_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetKernelInfo, object_, name, param), + __GET_KERNEL_INFO_ERR); + } + + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_kernel_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + template + cl_int getArgInfo(cl_uint argIndex, cl_kernel_arg_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetKernelArgInfo, object_, argIndex, name, param), + __GET_KERNEL_ARG_INFO_ERR); + } + + template typename + detail::param_traits::param_type + getArgInfo(cl_uint argIndex, cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_kernel_arg_info, name>::param_type param; + cl_int result = getArgInfo(argIndex, name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + + template + cl_int getWorkGroupInfo( + const Device& device, cl_kernel_work_group_info name, T* param) const + { + return detail::errHandler( + detail::getInfo( + &::clGetKernelWorkGroupInfo, object_, device(), name, param), + __GET_KERNEL_WORK_GROUP_INFO_ERR); + } + + template typename + detail::param_traits::param_type + getWorkGroupInfo(const Device& device, cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_kernel_work_group_info, name>::param_type param; + cl_int result = getWorkGroupInfo(device, name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +#if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR) + cl_int getSubGroupInfo(const cl::Device &dev, cl_kernel_sub_group_info name, const cl::NDRange &range, size_type* param) const + { + typedef clGetKernelSubGroupInfoKHR_fn PFN_clGetKernelSubGroupInfoKHR; + static PFN_clGetKernelSubGroupInfoKHR pfn_clGetKernelSubGroupInfoKHR = NULL; + CL_HPP_INIT_CL_EXT_FCN_PTR_(clGetKernelSubGroupInfoKHR); + + return detail::errHandler( + pfn_clGetKernelSubGroupInfoKHR(object_, dev(), name, range.size(), range.get(), sizeof(size_type), param, nullptr), + __GET_KERNEL_ARG_INFO_ERR); + } + + template + size_type getSubGroupInfo(const cl::Device &dev, const cl::NDRange &range, cl_int* err = NULL) const + { + size_type param; + cl_int result = getSubGroupInfo(dev, name, range, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } +#endif // #if defined(CL_HPP_USE_CL_SUB_GROUPS_KHR) +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + /*! \brief setArg overload taking a shared_ptr type + */ + template + cl_int setArg(cl_uint index, const cl::pointer &argPtr) + { + return detail::errHandler( + ::clSetKernelArgSVMPointer(object_, index, argPtr.get()), + __SET_KERNEL_ARGS_ERR); + } + + /*! \brief setArg overload taking a vector type. + */ + template + cl_int setArg(cl_uint index, const cl::vector &argPtr) + { + return detail::errHandler( + ::clSetKernelArgSVMPointer(object_, index, argPtr.data()), + __SET_KERNEL_ARGS_ERR); + } + + /*! \brief setArg overload taking a pointer type + */ + template + typename std::enable_if::value, cl_int>::type + setArg(cl_uint index, const T argPtr) + { + return detail::errHandler( + ::clSetKernelArgSVMPointer(object_, index, argPtr), + __SET_KERNEL_ARGS_ERR); + } +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + + /*! \brief setArg overload taking a POD type + */ + template + typename std::enable_if::value, cl_int>::type + setArg(cl_uint index, const T &value) + { + return detail::errHandler( + ::clSetKernelArg( + object_, + index, + detail::KernelArgumentHandler::size(value), + detail::KernelArgumentHandler::ptr(value)), + __SET_KERNEL_ARGS_ERR); + } + + cl_int setArg(cl_uint index, size_type size, const void* argPtr) + { + return detail::errHandler( + ::clSetKernelArg(object_, index, size, argPtr), + __SET_KERNEL_ARGS_ERR); + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + /*! + * Specify a vector of SVM pointers that the kernel may access in + * addition to its arguments. + */ + cl_int setSVMPointers(const vector &pointerList) + { + return detail::errHandler( + ::clSetKernelExecInfo( + object_, + CL_KERNEL_EXEC_INFO_SVM_PTRS, + sizeof(void*)*pointerList.size(), + pointerList.data())); + } + + /*! + * Specify a std::array of SVM pointers that the kernel may access in + * addition to its arguments. + */ + template + cl_int setSVMPointers(const std::array &pointerList) + { + return detail::errHandler( + ::clSetKernelExecInfo( + object_, + CL_KERNEL_EXEC_INFO_SVM_PTRS, + sizeof(void*)*pointerList.size(), + pointerList.data())); + } + + /*! \brief Enable fine-grained system SVM. + * + * \note It is only possible to enable fine-grained system SVM if all devices + * in the context associated with kernel support it. + * + * \param svmEnabled True if fine-grained system SVM is requested. False otherwise. + * \return CL_SUCCESS if the function was executed succesfully. CL_INVALID_OPERATION + * if no devices in the context support fine-grained system SVM. + * + * \see clSetKernelExecInfo + */ + cl_int enableFineGrainedSystemSVM(bool svmEnabled) + { + cl_bool svmEnabled_ = svmEnabled ? CL_TRUE : CL_FALSE; + return detail::errHandler( + ::clSetKernelExecInfo( + object_, + CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM, + sizeof(cl_bool), + &svmEnabled_ + ) + ); + } + + template + void setSVMPointersHelper(std::array &pointerList, const pointer &t0, Ts... ts) + { + pointerList[index] = static_cast(t0.get()); + setSVMPointersHelper(ts...); + } + + template + typename std::enable_if::value, void>::type + setSVMPointersHelper(std::array &pointerList, T0 t0, Ts... ts) + { + pointerList[index] = static_cast(t0); + setSVMPointersHelper(ts...); + } + + template + void setSVMPointersHelper(std::array &pointerList, const pointer &t0) + { + pointerList[index] = static_cast(t0.get()); + } + + template + typename std::enable_if::value, void>::type + setSVMPointersHelper(std::array &pointerList, T0 t0) + { + pointerList[index] = static_cast(t0); + } + + template + cl_int setSVMPointers(const T0 &t0, Ts... ts) + { + std::array pointerList; + + setSVMPointersHelper<0, 1 + sizeof...(Ts)>(pointerList, t0, ts...); + return detail::errHandler( + ::clSetKernelExecInfo( + object_, + CL_KERNEL_EXEC_INFO_SVM_PTRS, + sizeof(void*)*(1 + sizeof...(Ts)), + pointerList.data())); + } +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 +}; + +/*! \class Program + * \brief Program interface that implements cl_program. + */ +class Program : public detail::Wrapper +{ +public: +#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + typedef vector> Binaries; + typedef vector Sources; +#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + typedef vector > Binaries; + typedef vector > Sources; +#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + + Program( + const string& source, + bool build = false, + cl_int* err = NULL) + { + cl_int error; + + const char * strings = source.c_str(); + const size_type length = source.size(); + + Context context = Context::getDefault(err); + + object_ = ::clCreateProgramWithSource( + context(), (cl_uint)1, &strings, &length, &error); + + detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR); + + if (error == CL_SUCCESS && build) { + + error = ::clBuildProgram( + object_, + 0, + NULL, +#if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD) + "-cl-std=CL2.0", +#else + "", +#endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD) + NULL, + NULL); + + detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo()); + } + + if (err != NULL) { + *err = error; + } + } + + Program( + const Context& context, + const string& source, + bool build = false, + cl_int* err = NULL) + { + cl_int error; + + const char * strings = source.c_str(); + const size_type length = source.size(); + + object_ = ::clCreateProgramWithSource( + context(), (cl_uint)1, &strings, &length, &error); + + detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR); + + if (error == CL_SUCCESS && build) { + error = ::clBuildProgram( + object_, + 0, + NULL, +#if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD) + "-cl-std=CL2.0", +#else + "", +#endif // #if !defined(CL_HPP_CL_1_2_DEFAULT_BUILD) + NULL, + NULL); + + detail::buildErrHandler(error, __BUILD_PROGRAM_ERR, getBuildInfo()); + } + + if (err != NULL) { + *err = error; + } + } + + /** + * Create a program from a vector of source strings and the default context. + * Does not compile or link the program. + */ + Program( + const Sources& sources, + cl_int* err = NULL) + { + cl_int error; + Context context = Context::getDefault(err); + + const size_type n = (size_type)sources.size(); + + vector lengths(n); + vector strings(n); + + for (size_type i = 0; i < n; ++i) { +#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + strings[i] = sources[(int)i].data(); + lengths[i] = sources[(int)i].length(); +#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + strings[i] = sources[(int)i].first; + lengths[i] = sources[(int)i].second; +#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + } + + object_ = ::clCreateProgramWithSource( + context(), (cl_uint)n, strings.data(), lengths.data(), &error); + + detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR); + if (err != NULL) { + *err = error; + } + } + + /** + * Create a program from a vector of source strings and a provided context. + * Does not compile or link the program. + */ + Program( + const Context& context, + const Sources& sources, + cl_int* err = NULL) + { + cl_int error; + + const size_type n = (size_type)sources.size(); + + vector lengths(n); + vector strings(n); + + for (size_type i = 0; i < n; ++i) { +#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + strings[i] = sources[(int)i].data(); + lengths[i] = sources[(int)i].length(); +#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + strings[i] = sources[(int)i].first; + lengths[i] = sources[(int)i].second; +#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + } + + object_ = ::clCreateProgramWithSource( + context(), (cl_uint)n, strings.data(), lengths.data(), &error); + + detail::errHandler(error, __CREATE_PROGRAM_WITH_SOURCE_ERR); + if (err != NULL) { + *err = error; + } + } + + /** + * Construct a program object from a list of devices and a per-device list of binaries. + * \param context A valid OpenCL context in which to construct the program. + * \param devices A vector of OpenCL device objects for which the program will be created. + * \param binaries A vector of pairs of a pointer to a binary object and its length. + * \param binaryStatus An optional vector that on completion will be resized to + * match the size of binaries and filled with values to specify if each binary + * was successfully loaded. + * Set to CL_SUCCESS if the binary was successfully loaded. + * Set to CL_INVALID_VALUE if the length is 0 or the binary pointer is NULL. + * Set to CL_INVALID_BINARY if the binary provided is not valid for the matching device. + * \param err if non-NULL will be set to CL_SUCCESS on successful operation or one of the following errors: + * CL_INVALID_CONTEXT if context is not a valid context. + * CL_INVALID_VALUE if the length of devices is zero; or if the length of binaries does not match the length of devices; + * or if any entry in binaries is NULL or has length 0. + * CL_INVALID_DEVICE if OpenCL devices listed in devices are not in the list of devices associated with context. + * CL_INVALID_BINARY if an invalid program binary was encountered for any device. binaryStatus will return specific status for each device. + * CL_OUT_OF_HOST_MEMORY if there is a failure to allocate resources required by the OpenCL implementation on the host. + */ + Program( + const Context& context, + const vector& devices, + const Binaries& binaries, + vector* binaryStatus = NULL, + cl_int* err = NULL) + { + cl_int error; + + const size_type numDevices = devices.size(); + + // Catch size mismatch early and return + if(binaries.size() != numDevices) { + error = CL_INVALID_VALUE; + detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR); + if (err != NULL) { + *err = error; + } + return; + } + + + vector lengths(numDevices); + vector images(numDevices); +#if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + for (size_type i = 0; i < numDevices; ++i) { + images[i] = binaries[i].data(); + lengths[i] = binaries[(int)i].size(); + } +#else // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + for (size_type i = 0; i < numDevices; ++i) { + images[i] = (const unsigned char*)binaries[i].first; + lengths[i] = binaries[(int)i].second; + } +#endif // #if !defined(CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY) + + vector deviceIDs(numDevices); + for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) { + deviceIDs[deviceIndex] = (devices[deviceIndex])(); + } + + if(binaryStatus) { + binaryStatus->resize(numDevices); + } + + object_ = ::clCreateProgramWithBinary( + context(), (cl_uint) devices.size(), + deviceIDs.data(), + lengths.data(), images.data(), (binaryStatus != NULL && numDevices > 0) + ? &binaryStatus->front() + : NULL, &error); + + detail::errHandler(error, __CREATE_PROGRAM_WITH_BINARY_ERR); + if (err != NULL) { + *err = error; + } + } + + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + /** + * Create program using builtin kernels. + * \param kernelNames Semi-colon separated list of builtin kernel names + */ + Program( + const Context& context, + const vector& devices, + const string& kernelNames, + cl_int* err = NULL) + { + cl_int error; + + + size_type numDevices = devices.size(); + vector deviceIDs(numDevices); + for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) { + deviceIDs[deviceIndex] = (devices[deviceIndex])(); + } + + object_ = ::clCreateProgramWithBuiltInKernels( + context(), + (cl_uint) devices.size(), + deviceIDs.data(), + kernelNames.c_str(), + &error); + + detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR); + if (err != NULL) { + *err = error; + } + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + + Program() { } + + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + */ + explicit Program(const cl_program& program, bool retainObject = false) : + detail::Wrapper(program, retainObject) { } + + Program& operator = (const cl_program& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + Program(const Program& program) : detail::Wrapper(program) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + Program& operator = (const Program &program) + { + detail::Wrapper::operator=(program); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + Program(Program&& program) CL_HPP_NOEXCEPT_ : detail::Wrapper(std::move(program)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + Program& operator = (Program &&program) + { + detail::Wrapper::operator=(std::move(program)); + return *this; + } + + cl_int build( + const vector& devices, + const char* options = NULL, + void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL, + void* data = NULL) const + { + size_type numDevices = devices.size(); + vector deviceIDs(numDevices); + + for( size_type deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) { + deviceIDs[deviceIndex] = (devices[deviceIndex])(); + } + + cl_int buildError = ::clBuildProgram( + object_, + (cl_uint) + devices.size(), + deviceIDs.data(), + options, + notifyFptr, + data); + + return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo()); + } + + cl_int build( + const char* options = NULL, + void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL, + void* data = NULL) const + { + cl_int buildError = ::clBuildProgram( + object_, + 0, + NULL, + options, + notifyFptr, + data); + + + return detail::buildErrHandler(buildError, __BUILD_PROGRAM_ERR, getBuildInfo()); + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + cl_int compile( + const char* options = NULL, + void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL, + void* data = NULL) const + { + cl_int error = ::clCompileProgram( + object_, + 0, + NULL, + options, + 0, + NULL, + NULL, + notifyFptr, + data); + return detail::buildErrHandler(error, __COMPILE_PROGRAM_ERR, getBuildInfo()); + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + + template + cl_int getInfo(cl_program_info name, T* param) const + { + return detail::errHandler( + detail::getInfo(&::clGetProgramInfo, object_, name, param), + __GET_PROGRAM_INFO_ERR); + } + + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_program_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + template + cl_int getBuildInfo( + const Device& device, cl_program_build_info name, T* param) const + { + return detail::errHandler( + detail::getInfo( + &::clGetProgramBuildInfo, object_, device(), name, param), + __GET_PROGRAM_BUILD_INFO_ERR); + } + + template typename + detail::param_traits::param_type + getBuildInfo(const Device& device, cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_program_build_info, name>::param_type param; + cl_int result = getBuildInfo(device, name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + /** + * Build info function that returns a vector of device/info pairs for the specified + * info type and for all devices in the program. + * On an error reading the info for any device, an empty vector of info will be returned. + */ + template + vector::param_type>> + getBuildInfo(cl_int *err = NULL) const + { + cl_int result = CL_SUCCESS; + + auto devs = getInfo(&result); + vector::param_type>> + devInfo; + + // If there was an initial error from getInfo return the error + if (result != CL_SUCCESS) { + if (err != NULL) { + *err = result; + } + return devInfo; + } + + for (cl::Device d : devs) { + typename detail::param_traits< + detail::cl_program_build_info, name>::param_type param; + result = getBuildInfo(d, name, ¶m); + devInfo.push_back( + std::pair::param_type> + (d, param)); + if (result != CL_SUCCESS) { + // On error, leave the loop and return the error code + break; + } + } + if (err != NULL) { + *err = result; + } + if (result != CL_SUCCESS) { + devInfo.clear(); + } + return devInfo; + } + + cl_int createKernels(vector* kernels) + { + cl_uint numKernels; + cl_int err = ::clCreateKernelsInProgram(object_, 0, NULL, &numKernels); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR); + } + + vector value(numKernels); + + err = ::clCreateKernelsInProgram( + object_, numKernels, value.data(), NULL); + if (err != CL_SUCCESS) { + return detail::errHandler(err, __CREATE_KERNELS_IN_PROGRAM_ERR); + } + + if (kernels) { + kernels->resize(value.size()); + + // Assign to param, constructing with retain behaviour + // to correctly capture each underlying CL object + for (size_type i = 0; i < value.size(); i++) { + // We do not need to retain because this kernel is being created + // by the runtime + (*kernels)[i] = Kernel(value[i], false); + } + } + return CL_SUCCESS; + } +}; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 +inline Program linkProgram( + Program input1, + Program input2, + const char* options = NULL, + void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL, + void* data = NULL, + cl_int* err = NULL) +{ + cl_int error_local = CL_SUCCESS; + + cl_program programs[2] = { input1(), input2() }; + + Context ctx = input1.getInfo(&error_local); + if(error_local!=CL_SUCCESS) { + detail::errHandler(error_local, __LINK_PROGRAM_ERR); + } + + cl_program prog = ::clLinkProgram( + ctx(), + 0, + NULL, + options, + 2, + programs, + notifyFptr, + data, + &error_local); + + detail::errHandler(error_local,__COMPILE_PROGRAM_ERR); + if (err != NULL) { + *err = error_local; + } + + return Program(prog); +} + +inline Program linkProgram( + vector inputPrograms, + const char* options = NULL, + void (CL_CALLBACK * notifyFptr)(cl_program, void *) = NULL, + void* data = NULL, + cl_int* err = NULL) +{ + cl_int error_local = CL_SUCCESS; + + vector programs(inputPrograms.size()); + + for (unsigned int i = 0; i < inputPrograms.size(); i++) { + programs[i] = inputPrograms[i](); + } + + Context ctx; + if(inputPrograms.size() > 0) { + ctx = inputPrograms[0].getInfo(&error_local); + if(error_local!=CL_SUCCESS) { + detail::errHandler(error_local, __LINK_PROGRAM_ERR); + } + } + cl_program prog = ::clLinkProgram( + ctx(), + 0, + NULL, + options, + (cl_uint)inputPrograms.size(), + programs.data(), + notifyFptr, + data, + &error_local); + + detail::errHandler(error_local,__COMPILE_PROGRAM_ERR); + if (err != NULL) { + *err = error_local; + } + + return Program(prog, false); +} +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + +// Template specialization for CL_PROGRAM_BINARIES +template <> +inline cl_int cl::Program::getInfo(cl_program_info name, vector>* param) const +{ + if (name != CL_PROGRAM_BINARIES) { + return CL_INVALID_VALUE; + } + if (param) { + // Resize the parameter array appropriately for each allocation + // and pass down to the helper + + vector sizes = getInfo(); + size_type numBinaries = sizes.size(); + + // Resize the parameter array and constituent arrays + param->resize(numBinaries); + for (size_type i = 0; i < numBinaries; ++i) { + (*param)[i].resize(sizes[i]); + } + + return detail::errHandler( + detail::getInfo(&::clGetProgramInfo, object_, name, param), + __GET_PROGRAM_INFO_ERR); + } + + return CL_SUCCESS; +} + +template<> +inline vector> cl::Program::getInfo(cl_int* err) const +{ + vector> binariesVectors; + + cl_int result = getInfo(CL_PROGRAM_BINARIES, &binariesVectors); + if (err != NULL) { + *err = result; + } + return binariesVectors; +} + +inline Kernel::Kernel(const Program& program, const char* name, cl_int* err) +{ + cl_int error; + + object_ = ::clCreateKernel(program(), name, &error); + detail::errHandler(error, __CREATE_KERNEL_ERR); + + if (err != NULL) { + *err = error; + } + +} + +enum class QueueProperties : cl_command_queue_properties +{ + None = 0, + Profiling = CL_QUEUE_PROFILING_ENABLE, + OutOfOrder = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, +}; + +inline QueueProperties operator|(QueueProperties lhs, QueueProperties rhs) +{ + return static_cast(static_cast(lhs) | static_cast(rhs)); +} + +/*! \class CommandQueue + * \brief CommandQueue interface for cl_command_queue. + */ +class CommandQueue : public detail::Wrapper +{ +private: + static std::once_flag default_initialized_; + static CommandQueue default_; + static cl_int default_error_; + + /*! \brief Create the default command queue returned by @ref getDefault. + * + * It sets default_error_ to indicate success or failure. It does not throw + * @c cl::Error. + */ + static void makeDefault() + { + /* We don't want to throw an error from this function, so we have to + * catch and set the error flag. + */ +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + try +#endif + { + int error; + Context context = Context::getDefault(&error); + + if (error != CL_SUCCESS) { + default_error_ = error; + } + else { + Device device = Device::getDefault(); + default_ = CommandQueue(context, device, 0, &default_error_); + } + } +#if defined(CL_HPP_ENABLE_EXCEPTIONS) + catch (cl::Error &e) { + default_error_ = e.err(); + } +#endif + } + + /*! \brief Create the default command queue. + * + * This sets @c default_. It does not throw + * @c cl::Error. + */ + static void makeDefaultProvided(const CommandQueue &c) { + default_ = c; + } + +public: +#ifdef CL_HPP_UNIT_TEST_ENABLE + /*! \brief Reset the default. + * + * This sets @c default_ to an empty value to support cleanup in + * the unit test framework. + * This function is not thread safe. + */ + static void unitTestClearDefault() { + default_ = CommandQueue(); + } +#endif // #ifdef CL_HPP_UNIT_TEST_ENABLE + + + /*! + * \brief Constructs a CommandQueue based on passed properties. + * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified. + */ + CommandQueue( + cl_command_queue_properties properties, + cl_int* err = NULL) + { + cl_int error; + + Context context = Context::getDefault(&error); + detail::errHandler(error, __CREATE_CONTEXT_ERR); + + if (error != CL_SUCCESS) { + if (err != NULL) { + *err = error; + } + } + else { + Device device = context.getInfo()[0]; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, properties, 0 }; + if ((properties & CL_QUEUE_ON_DEVICE) == 0) { + object_ = ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error); + } + else { + error = CL_INVALID_QUEUE_PROPERTIES; + } + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } +#else + object_ = ::clCreateCommandQueue( + context(), device(), properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR); + if (err != NULL) { + *err = error; + } +#endif + } + } + + /*! + * \brief Constructs a CommandQueue based on passed properties. + * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified. + */ + CommandQueue( + QueueProperties properties, + cl_int* err = NULL) + { + cl_int error; + + Context context = Context::getDefault(&error); + detail::errHandler(error, __CREATE_CONTEXT_ERR); + + if (error != CL_SUCCESS) { + if (err != NULL) { + *err = error; + } + } + else { + Device device = context.getInfo()[0]; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, static_cast(properties), 0 }; + + object_ = ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error); + + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } +#else + object_ = ::clCreateCommandQueue( + context(), device(), static_cast(properties), &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR); + if (err != NULL) { + *err = error; + } +#endif + } + } + + /*! + * \brief Constructs a CommandQueue for an implementation defined device in the given context + * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified. + */ + explicit CommandQueue( + const Context& context, + cl_command_queue_properties properties = 0, + cl_int* err = NULL) + { + cl_int error; + vector devices; + error = context.getInfo(CL_CONTEXT_DEVICES, &devices); + + detail::errHandler(error, __CREATE_CONTEXT_ERR); + + if (error != CL_SUCCESS) + { + if (err != NULL) { + *err = error; + } + return; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, properties, 0 }; + if ((properties & CL_QUEUE_ON_DEVICE) == 0) { + object_ = ::clCreateCommandQueueWithProperties( + context(), devices[0](), queue_properties, &error); + } + else { + error = CL_INVALID_QUEUE_PROPERTIES; + } + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } +#else + object_ = ::clCreateCommandQueue( + context(), devices[0](), properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR); + if (err != NULL) { + *err = error; + } +#endif + + } + + /*! + * \brief Constructs a CommandQueue for an implementation defined device in the given context + * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified. + */ + explicit CommandQueue( + const Context& context, + QueueProperties properties, + cl_int* err = NULL) + { + cl_int error; + vector devices; + error = context.getInfo(CL_CONTEXT_DEVICES, &devices); + + detail::errHandler(error, __CREATE_CONTEXT_ERR); + + if (error != CL_SUCCESS) + { + if (err != NULL) { + *err = error; + } + return; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, static_cast(properties), 0 }; + object_ = ::clCreateCommandQueueWithProperties( + context(), devices[0](), queue_properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } +#else + object_ = ::clCreateCommandQueue( + context(), devices[0](), static_cast(properties), &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR); + if (err != NULL) { + *err = error; + } +#endif + + } + + /*! + * \brief Constructs a CommandQueue for a passed device and context + * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified. + */ + CommandQueue( + const Context& context, + const Device& device, + cl_command_queue_properties properties = 0, + cl_int* err = NULL) + { + cl_int error; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, properties, 0 }; + object_ = ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } +#else + object_ = ::clCreateCommandQueue( + context(), device(), properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR); + if (err != NULL) { + *err = error; + } +#endif + } + + /*! + * \brief Constructs a CommandQueue for a passed device and context + * Will return an CL_INVALID_QUEUE_PROPERTIES error if CL_QUEUE_ON_DEVICE is specified. + */ + CommandQueue( + const Context& context, + const Device& device, + QueueProperties properties, + cl_int* err = NULL) + { + cl_int error; + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, static_cast(properties), 0 }; + object_ = ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } +#else + object_ = ::clCreateCommandQueue( + context(), device(), static_cast(properties), &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_ERR); + if (err != NULL) { + *err = error; + } +#endif + } + + static CommandQueue getDefault(cl_int * err = NULL) + { + std::call_once(default_initialized_, makeDefault); +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); +#else // CL_HPP_TARGET_OPENCL_VERSION >= 200 + detail::errHandler(default_error_, __CREATE_COMMAND_QUEUE_ERR); +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 200 + if (err != NULL) { + *err = default_error_; + } + return default_; + } + + /** + * Modify the default command queue to be used by + * subsequent operations. + * Will only set the default if no default was previously created. + * @return updated default command queue. + * Should be compared to the passed value to ensure that it was updated. + */ + static CommandQueue setDefault(const CommandQueue &default_queue) + { + std::call_once(default_initialized_, makeDefaultProvided, std::cref(default_queue)); + detail::errHandler(default_error_); + return default_; + } + + CommandQueue() { } + + + /*! \brief Constructor from cl_mem - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + */ + explicit CommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) : + detail::Wrapper(commandQueue, retainObject) { } + + CommandQueue& operator = (const cl_command_queue& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + CommandQueue(const CommandQueue& queue) : detail::Wrapper(queue) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + CommandQueue& operator = (const CommandQueue &queue) + { + detail::Wrapper::operator=(queue); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + CommandQueue(CommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper(std::move(queue)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + CommandQueue& operator = (CommandQueue &&queue) + { + detail::Wrapper::operator=(std::move(queue)); + return *this; + } + + template + cl_int getInfo(cl_command_queue_info name, T* param) const + { + return detail::errHandler( + detail::getInfo( + &::clGetCommandQueueInfo, object_, name, param), + __GET_COMMAND_QUEUE_INFO_ERR); + } + + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_command_queue_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + cl_int enqueueReadBuffer( + const Buffer& buffer, + cl_bool blocking, + size_type offset, + size_type size, + void* ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueReadBuffer( + object_, buffer(), blocking, offset, size, + ptr, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_READ_BUFFER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueWriteBuffer( + const Buffer& buffer, + cl_bool blocking, + size_type offset, + size_type size, + const void* ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueWriteBuffer( + object_, buffer(), blocking, offset, size, + ptr, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_WRITE_BUFFER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueCopyBuffer( + const Buffer& src, + const Buffer& dst, + size_type src_offset, + size_type dst_offset, + size_type size, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueCopyBuffer( + object_, src(), dst(), src_offset, dst_offset, size, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQEUE_COPY_BUFFER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueReadBufferRect( + const Buffer& buffer, + cl_bool blocking, + const array& buffer_offset, + const array& host_offset, + const array& region, + size_type buffer_row_pitch, + size_type buffer_slice_pitch, + size_type host_row_pitch, + size_type host_slice_pitch, + void *ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueReadBufferRect( + object_, + buffer(), + blocking, + buffer_offset.data(), + host_offset.data(), + region.data(), + buffer_row_pitch, + buffer_slice_pitch, + host_row_pitch, + host_slice_pitch, + ptr, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_READ_BUFFER_RECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueWriteBufferRect( + const Buffer& buffer, + cl_bool blocking, + const array& buffer_offset, + const array& host_offset, + const array& region, + size_type buffer_row_pitch, + size_type buffer_slice_pitch, + size_type host_row_pitch, + size_type host_slice_pitch, + const void *ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueWriteBufferRect( + object_, + buffer(), + blocking, + buffer_offset.data(), + host_offset.data(), + region.data(), + buffer_row_pitch, + buffer_slice_pitch, + host_row_pitch, + host_slice_pitch, + ptr, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_WRITE_BUFFER_RECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueCopyBufferRect( + const Buffer& src, + const Buffer& dst, + const array& src_origin, + const array& dst_origin, + const array& region, + size_type src_row_pitch, + size_type src_slice_pitch, + size_type dst_row_pitch, + size_type dst_slice_pitch, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueCopyBufferRect( + object_, + src(), + dst(), + src_origin.data(), + dst_origin.data(), + region.data(), + src_row_pitch, + src_slice_pitch, + dst_row_pitch, + dst_slice_pitch, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQEUE_COPY_BUFFER_RECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + /** + * Enqueue a command to fill a buffer object with a pattern + * of a given size. The pattern is specified as a vector type. + * \tparam PatternType The datatype of the pattern field. + * The pattern type must be an accepted OpenCL data type. + * \tparam offset Is the offset in bytes into the buffer at + * which to start filling. This must be a multiple of + * the pattern size. + * \tparam size Is the size in bytes of the region to fill. + * This must be a multiple of the pattern size. + */ + template + cl_int enqueueFillBuffer( + const Buffer& buffer, + PatternType pattern, + size_type offset, + size_type size, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueFillBuffer( + object_, + buffer(), + static_cast(&pattern), + sizeof(PatternType), + offset, + size, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_FILL_BUFFER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + + cl_int enqueueReadImage( + const Image& image, + cl_bool blocking, + const array& origin, + const array& region, + size_type row_pitch, + size_type slice_pitch, + void* ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueReadImage( + object_, + image(), + blocking, + origin.data(), + region.data(), + row_pitch, + slice_pitch, + ptr, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_READ_IMAGE_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueWriteImage( + const Image& image, + cl_bool blocking, + const array& origin, + const array& region, + size_type row_pitch, + size_type slice_pitch, + const void* ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueWriteImage( + object_, + image(), + blocking, + origin.data(), + region.data(), + row_pitch, + slice_pitch, + ptr, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_WRITE_IMAGE_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueCopyImage( + const Image& src, + const Image& dst, + const array& src_origin, + const array& dst_origin, + const array& region, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueCopyImage( + object_, + src(), + dst(), + src_origin.data(), + dst_origin.data(), + region.data(), + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_COPY_IMAGE_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + /** + * Enqueue a command to fill an image object with a specified color. + * \param fillColor is the color to use to fill the image. + * This is a four component RGBA floating-point color value if + * the image channel data type is not an unnormalized signed or + * unsigned data type. + */ + cl_int enqueueFillImage( + const Image& image, + cl_float4 fillColor, + const array& origin, + const array& region, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueFillImage( + object_, + image(), + static_cast(&fillColor), + origin.data(), + region.data(), + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_FILL_IMAGE_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + /** + * Enqueue a command to fill an image object with a specified color. + * \param fillColor is the color to use to fill the image. + * This is a four component RGBA signed integer color value if + * the image channel data type is an unnormalized signed integer + * type. + */ + cl_int enqueueFillImage( + const Image& image, + cl_int4 fillColor, + const array& origin, + const array& region, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueFillImage( + object_, + image(), + static_cast(&fillColor), + origin.data(), + region.data(), + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_FILL_IMAGE_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + /** + * Enqueue a command to fill an image object with a specified color. + * \param fillColor is the color to use to fill the image. + * This is a four component RGBA unsigned integer color value if + * the image channel data type is an unnormalized unsigned integer + * type. + */ + cl_int enqueueFillImage( + const Image& image, + cl_uint4 fillColor, + const array& origin, + const array& region, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueFillImage( + object_, + image(), + static_cast(&fillColor), + origin.data(), + region.data(), + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_FILL_IMAGE_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + + cl_int enqueueCopyImageToBuffer( + const Image& src, + const Buffer& dst, + const array& src_origin, + const array& region, + size_type dst_offset, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueCopyImageToBuffer( + object_, + src(), + dst(), + src_origin.data(), + region.data(), + dst_offset, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueCopyBufferToImage( + const Buffer& src, + const Image& dst, + size_type src_offset, + const array& dst_origin, + const array& region, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueCopyBufferToImage( + object_, + src(), + dst(), + src_offset, + dst_origin.data(), + region.data(), + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + void* enqueueMapBuffer( + const Buffer& buffer, + cl_bool blocking, + cl_map_flags flags, + size_type offset, + size_type size, + const vector* events = NULL, + Event* event = NULL, + cl_int* err = NULL) const + { + cl_event tmp; + cl_int error; + void * result = ::clEnqueueMapBuffer( + object_, buffer(), blocking, flags, offset, size, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL, + &error); + + detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + if (event != NULL && error == CL_SUCCESS) + *event = tmp; + + return result; + } + + void* enqueueMapImage( + const Image& buffer, + cl_bool blocking, + cl_map_flags flags, + const array& origin, + const array& region, + size_type * row_pitch, + size_type * slice_pitch, + const vector* events = NULL, + Event* event = NULL, + cl_int* err = NULL) const + { + cl_event tmp; + cl_int error; + void * result = ::clEnqueueMapImage( + object_, buffer(), blocking, flags, + origin.data(), + region.data(), + row_pitch, slice_pitch, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL, + &error); + + detail::errHandler(error, __ENQUEUE_MAP_IMAGE_ERR); + if (err != NULL) { + *err = error; + } + if (event != NULL && error == CL_SUCCESS) + *event = tmp; + return result; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + /** + * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer. + * This variant takes a raw SVM pointer. + */ + template + cl_int enqueueMapSVM( + T* ptr, + cl_bool blocking, + cl_map_flags flags, + size_type size, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler(::clEnqueueSVMMap( + object_, blocking, flags, static_cast(ptr), size, + (events != NULL) ? (cl_uint)events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_MAP_BUFFER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + + /** + * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer. + * This variant takes a cl::pointer instance. + */ + template + cl_int enqueueMapSVM( + cl::pointer &ptr, + cl_bool blocking, + cl_map_flags flags, + size_type size, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler(::clEnqueueSVMMap( + object_, blocking, flags, static_cast(ptr.get()), size, + (events != NULL) ? (cl_uint)events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_MAP_BUFFER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + /** + * Enqueues a command that will allow the host to update a region of a coarse-grained SVM buffer. + * This variant takes a cl::vector instance. + */ + template + cl_int enqueueMapSVM( + cl::vector &container, + cl_bool blocking, + cl_map_flags flags, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler(::clEnqueueSVMMap( + object_, blocking, flags, static_cast(container.data()), container.size(), + (events != NULL) ? (cl_uint)events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_MAP_BUFFER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + + cl_int enqueueUnmapMemObject( + const Memory& memory, + void* mapped_ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueUnmapMemObject( + object_, memory(), mapped_ptr, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + /** + * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime. + * This variant takes a raw SVM pointer. + */ + template + cl_int enqueueUnmapSVM( + T* ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueSVMUnmap( + object_, static_cast(ptr), + (events != NULL) ? (cl_uint)events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + /** + * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime. + * This variant takes a cl::pointer instance. + */ + template + cl_int enqueueUnmapSVM( + cl::pointer &ptr, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueSVMUnmap( + object_, static_cast(ptr.get()), + (events != NULL) ? (cl_uint)events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + /** + * Enqueues a command that will release a coarse-grained SVM buffer back to the OpenCL runtime. + * This variant takes a cl::vector instance. + */ + template + cl_int enqueueUnmapSVM( + cl::vector &container, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueSVMUnmap( + object_, static_cast(container.data()), + (events != NULL) ? (cl_uint)events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + /** + * Enqueues a marker command which waits for either a list of events to complete, + * or all previously enqueued commands to complete. + * + * Enqueues a marker command which waits for either a list of events to complete, + * or if the list is empty it waits for all commands previously enqueued in command_queue + * to complete before it completes. This command returns an event which can be waited on, + * i.e. this event can be waited on to insure that all events either in the event_wait_list + * or all previously enqueued commands, queued before this command to command_queue, + * have completed. + */ + cl_int enqueueMarkerWithWaitList( + const vector *events = 0, + Event *event = 0) + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueMarkerWithWaitList( + object_, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_MARKER_WAIT_LIST_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + /** + * A synchronization point that enqueues a barrier operation. + * + * Enqueues a barrier command which waits for either a list of events to complete, + * or if the list is empty it waits for all commands previously enqueued in command_queue + * to complete before it completes. This command blocks command execution, that is, any + * following commands enqueued after it do not execute until it completes. This command + * returns an event which can be waited on, i.e. this event can be waited on to insure that + * all events either in the event_wait_list or all previously enqueued commands, queued + * before this command to command_queue, have completed. + */ + cl_int enqueueBarrierWithWaitList( + const vector *events = 0, + Event *event = 0) + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueBarrierWithWaitList( + object_, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_BARRIER_WAIT_LIST_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + /** + * Enqueues a command to indicate with which device a set of memory objects + * should be associated. + */ + cl_int enqueueMigrateMemObjects( + const vector &memObjects, + cl_mem_migration_flags flags, + const vector* events = NULL, + Event* event = NULL + ) + { + cl_event tmp; + + vector localMemObjects(memObjects.size()); + + for( int i = 0; i < (int)memObjects.size(); ++i ) { + localMemObjects[i] = memObjects[i](); + } + + + cl_int err = detail::errHandler( + ::clEnqueueMigrateMemObjects( + object_, + (cl_uint)memObjects.size(), + localMemObjects.data(), + flags, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 + + cl_int enqueueNDRangeKernel( + const Kernel& kernel, + const NDRange& offset, + const NDRange& global, + const NDRange& local = NullRange, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueNDRangeKernel( + object_, kernel(), (cl_uint) global.dimensions(), + offset.dimensions() != 0 ? (const size_type*) offset : NULL, + (const size_type*) global, + local.dimensions() != 0 ? (const size_type*) local : NULL, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_NDRANGE_KERNEL_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + +#if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS) + CL_EXT_PREFIX__VERSION_1_2_DEPRECATED cl_int enqueueTask( + const Kernel& kernel, + const vector* events = NULL, + Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueTask( + object_, kernel(), + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_TASK_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } +#endif // #if defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS) + + cl_int enqueueNativeKernel( + void (CL_CALLBACK *userFptr)(void *), + std::pair args, + const vector* mem_objects = NULL, + const vector* mem_locs = NULL, + const vector* events = NULL, + Event* event = NULL) const + { + size_type elements = 0; + if (mem_objects != NULL) { + elements = mem_objects->size(); + } + vector mems(elements); + for (unsigned int i = 0; i < elements; i++) { + mems[i] = ((*mem_objects)[i])(); + } + + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueNativeKernel( + object_, userFptr, args.first, args.second, + (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0, + mems.data(), + (mem_locs != NULL && mem_locs->size() > 0) ? (const void **) &mem_locs->front() : NULL, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_NATIVE_KERNEL); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + +/** + * Deprecated APIs for 1.2 + */ +#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) + CL_EXT_PREFIX__VERSION_1_1_DEPRECATED + cl_int enqueueMarker(Event* event = NULL) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueMarker( + object_, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_MARKER_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + CL_EXT_PREFIX__VERSION_1_1_DEPRECATED + cl_int enqueueWaitForEvents(const vector& events) const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED + { + return detail::errHandler( + ::clEnqueueWaitForEvents( + object_, + (cl_uint) events.size(), + events.size() > 0 ? (const cl_event*) &events.front() : NULL), + __ENQUEUE_WAIT_FOR_EVENTS_ERR); + } +#endif // defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) + + cl_int enqueueAcquireGLObjects( + const vector* mem_objects = NULL, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueAcquireGLObjects( + object_, + (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0, + (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_ACQUIRE_GL_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueReleaseGLObjects( + const vector* mem_objects = NULL, + const vector* events = NULL, + Event* event = NULL) const + { + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueReleaseGLObjects( + object_, + (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0, + (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_RELEASE_GL_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + +#if defined (CL_HPP_USE_DX_INTEROP) +typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueAcquireD3D10ObjectsKHR)( + cl_command_queue command_queue, cl_uint num_objects, + const cl_mem* mem_objects, cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, cl_event* event); +typedef CL_API_ENTRY cl_int (CL_API_CALL *PFN_clEnqueueReleaseD3D10ObjectsKHR)( + cl_command_queue command_queue, cl_uint num_objects, + const cl_mem* mem_objects, cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, cl_event* event); + + cl_int enqueueAcquireD3D10Objects( + const vector* mem_objects = NULL, + const vector* events = NULL, + Event* event = NULL) const + { + static PFN_clEnqueueAcquireD3D10ObjectsKHR pfn_clEnqueueAcquireD3D10ObjectsKHR = NULL; +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + cl_context context = getInfo(); + cl::Device device(getInfo()); + cl_platform_id platform = device.getInfo(); + CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueAcquireD3D10ObjectsKHR); +#endif +#if CL_HPP_TARGET_OPENCL_VERSION >= 110 + CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueAcquireD3D10ObjectsKHR); +#endif + + cl_event tmp; + cl_int err = detail::errHandler( + pfn_clEnqueueAcquireD3D10ObjectsKHR( + object_, + (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0, + (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_ACQUIRE_GL_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } + + cl_int enqueueReleaseD3D10Objects( + const vector* mem_objects = NULL, + const vector* events = NULL, + Event* event = NULL) const + { + static PFN_clEnqueueReleaseD3D10ObjectsKHR pfn_clEnqueueReleaseD3D10ObjectsKHR = NULL; +#if CL_HPP_TARGET_OPENCL_VERSION >= 120 + cl_context context = getInfo(); + cl::Device device(getInfo()); + cl_platform_id platform = device.getInfo(); + CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_(platform, clEnqueueReleaseD3D10ObjectsKHR); +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 120 +#if CL_HPP_TARGET_OPENCL_VERSION >= 110 + CL_HPP_INIT_CL_EXT_FCN_PTR_(clEnqueueReleaseD3D10ObjectsKHR); +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 + + cl_event tmp; + cl_int err = detail::errHandler( + pfn_clEnqueueReleaseD3D10ObjectsKHR( + object_, + (mem_objects != NULL) ? (cl_uint) mem_objects->size() : 0, + (mem_objects != NULL && mem_objects->size() > 0) ? (const cl_mem *) &mem_objects->front(): NULL, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_RELEASE_GL_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; + } +#endif + +/** + * Deprecated APIs for 1.2 + */ +#if defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS) + CL_EXT_PREFIX__VERSION_1_1_DEPRECATED + cl_int enqueueBarrier() const CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED + { + return detail::errHandler( + ::clEnqueueBarrier(object_), + __ENQUEUE_BARRIER_ERR); + } +#endif // CL_USE_DEPRECATED_OPENCL_1_1_APIS + + cl_int flush() const + { + return detail::errHandler(::clFlush(object_), __FLUSH_ERR); + } + + cl_int finish() const + { + return detail::errHandler(::clFinish(object_), __FINISH_ERR); + } +}; // CommandQueue + +CL_HPP_DEFINE_STATIC_MEMBER_ std::once_flag CommandQueue::default_initialized_; +CL_HPP_DEFINE_STATIC_MEMBER_ CommandQueue CommandQueue::default_; +CL_HPP_DEFINE_STATIC_MEMBER_ cl_int CommandQueue::default_error_ = CL_SUCCESS; + + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +enum class DeviceQueueProperties : cl_command_queue_properties +{ + None = 0, + Profiling = CL_QUEUE_PROFILING_ENABLE, +}; + +inline DeviceQueueProperties operator|(DeviceQueueProperties lhs, DeviceQueueProperties rhs) +{ + return static_cast(static_cast(lhs) | static_cast(rhs)); +} + +/*! \class DeviceCommandQueue + * \brief DeviceCommandQueue interface for device cl_command_queues. + */ +class DeviceCommandQueue : public detail::Wrapper +{ +public: + + /*! + * Trivial empty constructor to create a null queue. + */ + DeviceCommandQueue() { } + + /*! + * Default construct device command queue on default context and device + */ + DeviceCommandQueue(DeviceQueueProperties properties, cl_int* err = NULL) + { + cl_int error; + cl::Context context = cl::Context::getDefault(); + cl::Device device = cl::Device::getDefault(); + + cl_command_queue_properties mergedProperties = + CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast(properties); + + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, mergedProperties, 0 }; + object_ = ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } + } + + /*! + * Create a device command queue for a specified device in the passed context. + */ + DeviceCommandQueue( + const Context& context, + const Device& device, + DeviceQueueProperties properties = DeviceQueueProperties::None, + cl_int* err = NULL) + { + cl_int error; + + cl_command_queue_properties mergedProperties = + CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast(properties); + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, mergedProperties, 0 }; + object_ = ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } + } + + /*! + * Create a device command queue for a specified device in the passed context. + */ + DeviceCommandQueue( + const Context& context, + const Device& device, + cl_uint queueSize, + DeviceQueueProperties properties = DeviceQueueProperties::None, + cl_int* err = NULL) + { + cl_int error; + + cl_command_queue_properties mergedProperties = + CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | static_cast(properties); + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, mergedProperties, + CL_QUEUE_SIZE, queueSize, + 0 }; + object_ = ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } + } + + /*! \brief Constructor from cl_command_queue - takes ownership. + * + * \param retainObject will cause the constructor to retain its cl object. + * Defaults to false to maintain compatibility with + * earlier versions. + */ + explicit DeviceCommandQueue(const cl_command_queue& commandQueue, bool retainObject = false) : + detail::Wrapper(commandQueue, retainObject) { } + + DeviceCommandQueue& operator = (const cl_command_queue& rhs) + { + detail::Wrapper::operator=(rhs); + return *this; + } + + /*! \brief Copy constructor to forward copy to the superclass correctly. + * Required for MSVC. + */ + DeviceCommandQueue(const DeviceCommandQueue& queue) : detail::Wrapper(queue) {} + + /*! \brief Copy assignment to forward copy to the superclass correctly. + * Required for MSVC. + */ + DeviceCommandQueue& operator = (const DeviceCommandQueue &queue) + { + detail::Wrapper::operator=(queue); + return *this; + } + + /*! \brief Move constructor to forward move to the superclass correctly. + * Required for MSVC. + */ + DeviceCommandQueue(DeviceCommandQueue&& queue) CL_HPP_NOEXCEPT_ : detail::Wrapper(std::move(queue)) {} + + /*! \brief Move assignment to forward move to the superclass correctly. + * Required for MSVC. + */ + DeviceCommandQueue& operator = (DeviceCommandQueue &&queue) + { + detail::Wrapper::operator=(std::move(queue)); + return *this; + } + + template + cl_int getInfo(cl_command_queue_info name, T* param) const + { + return detail::errHandler( + detail::getInfo( + &::clGetCommandQueueInfo, object_, name, param), + __GET_COMMAND_QUEUE_INFO_ERR); + } + + template typename + detail::param_traits::param_type + getInfo(cl_int* err = NULL) const + { + typename detail::param_traits< + detail::cl_command_queue_info, name>::param_type param; + cl_int result = getInfo(name, ¶m); + if (err != NULL) { + *err = result; + } + return param; + } + + /*! + * Create a new default device command queue for the default device, + * in the default context and of the default size. + * If there is already a default queue for the specified device this + * function will return the pre-existing queue. + */ + static DeviceCommandQueue makeDefault( + cl_int *err = nullptr) + { + cl_int error; + cl::Context context = cl::Context::getDefault(); + cl::Device device = cl::Device::getDefault(); + + cl_command_queue_properties properties = + CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT; + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, properties, + 0 }; + DeviceCommandQueue deviceQueue( + ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error)); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } + + return deviceQueue; + } + + /*! + * Create a new default device command queue for the specified device + * and of the default size. + * If there is already a default queue for the specified device this + * function will return the pre-existing queue. + */ + static DeviceCommandQueue makeDefault( + const Context &context, const Device &device, cl_int *err = nullptr) + { + cl_int error; + + cl_command_queue_properties properties = + CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT; + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, properties, + 0 }; + DeviceCommandQueue deviceQueue( + ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error)); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } + + return deviceQueue; + } + + /*! + * Create a new default device command queue for the specified device + * and of the requested size in bytes. + * If there is already a default queue for the specified device this + * function will return the pre-existing queue. + */ + static DeviceCommandQueue makeDefault( + const Context &context, const Device &device, cl_uint queueSize, cl_int *err = nullptr) + { + cl_int error; + + cl_command_queue_properties properties = + CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE | CL_QUEUE_ON_DEVICE | CL_QUEUE_ON_DEVICE_DEFAULT; + cl_queue_properties queue_properties[] = { + CL_QUEUE_PROPERTIES, properties, + CL_QUEUE_SIZE, queueSize, + 0 }; + DeviceCommandQueue deviceQueue( + ::clCreateCommandQueueWithProperties( + context(), device(), queue_properties, &error)); + + detail::errHandler(error, __CREATE_COMMAND_QUEUE_WITH_PROPERTIES_ERR); + if (err != NULL) { + *err = error; + } + + return deviceQueue; + } +}; // DeviceCommandQueue + +namespace detail +{ + // Specialization for device command queue + template <> + struct KernelArgumentHandler + { + static size_type size(const cl::DeviceCommandQueue&) { return sizeof(cl_command_queue); } + static const cl_command_queue* ptr(const cl::DeviceCommandQueue& value) { return &(value()); } + }; +} // namespace detail + +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + + +template< typename IteratorType > +Buffer::Buffer( + const Context &context, + IteratorType startIterator, + IteratorType endIterator, + bool readOnly, + bool useHostPtr, + cl_int* err) +{ + typedef typename std::iterator_traits::value_type DataType; + cl_int error; + + cl_mem_flags flags = 0; + if( readOnly ) { + flags |= CL_MEM_READ_ONLY; + } + else { + flags |= CL_MEM_READ_WRITE; + } + if( useHostPtr ) { + flags |= CL_MEM_USE_HOST_PTR; + } + + size_type size = sizeof(DataType)*(endIterator - startIterator); + + if( useHostPtr ) { + object_ = ::clCreateBuffer(context(), flags, size, static_cast(&*startIterator), &error); + } else { + object_ = ::clCreateBuffer(context(), flags, size, 0, &error); + } + + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + + if( !useHostPtr ) { + CommandQueue queue(context, 0, &error); + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + + error = cl::copy(queue, startIterator, endIterator, *this); + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + } +} + +template< typename IteratorType > +Buffer::Buffer( + const CommandQueue &queue, + IteratorType startIterator, + IteratorType endIterator, + bool readOnly, + bool useHostPtr, + cl_int* err) +{ + typedef typename std::iterator_traits::value_type DataType; + cl_int error; + + cl_mem_flags flags = 0; + if (readOnly) { + flags |= CL_MEM_READ_ONLY; + } + else { + flags |= CL_MEM_READ_WRITE; + } + if (useHostPtr) { + flags |= CL_MEM_USE_HOST_PTR; + } + + size_type size = sizeof(DataType)*(endIterator - startIterator); + + Context context = queue.getInfo(); + + if (useHostPtr) { + object_ = ::clCreateBuffer(context(), flags, size, static_cast(&*startIterator), &error); + } + else { + object_ = ::clCreateBuffer(context(), flags, size, 0, &error); + } + + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + + if (!useHostPtr) { + error = cl::copy(queue, startIterator, endIterator, *this); + detail::errHandler(error, __CREATE_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + } +} + +inline cl_int enqueueReadBuffer( + const Buffer& buffer, + cl_bool blocking, + size_type offset, + size_type size, + void* ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueReadBuffer(buffer, blocking, offset, size, ptr, events, event); +} + +inline cl_int enqueueWriteBuffer( + const Buffer& buffer, + cl_bool blocking, + size_type offset, + size_type size, + const void* ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueWriteBuffer(buffer, blocking, offset, size, ptr, events, event); +} + +inline void* enqueueMapBuffer( + const Buffer& buffer, + cl_bool blocking, + cl_map_flags flags, + size_type offset, + size_type size, + const vector* events = NULL, + Event* event = NULL, + cl_int* err = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + + void * result = ::clEnqueueMapBuffer( + queue(), buffer(), blocking, flags, offset, size, + (events != NULL) ? (cl_uint) events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*) &events->front() : NULL, + (cl_event*) event, + &error); + + detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR); + if (err != NULL) { + *err = error; + } + return result; +} + + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +/** + * Enqueues to the default queue a command that will allow the host to + * update a region of a coarse-grained SVM buffer. + * This variant takes a raw SVM pointer. + */ +template +inline cl_int enqueueMapSVM( + T* ptr, + cl_bool blocking, + cl_map_flags flags, + size_type size, + const vector* events, + Event* event) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + if (error != CL_SUCCESS) { + return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR); + } + + return queue.enqueueMapSVM( + ptr, blocking, flags, size, events, event); +} + +/** + * Enqueues to the default queue a command that will allow the host to + * update a region of a coarse-grained SVM buffer. + * This variant takes a cl::pointer instance. + */ +template +inline cl_int enqueueMapSVM( + cl::pointer ptr, + cl_bool blocking, + cl_map_flags flags, + size_type size, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + if (error != CL_SUCCESS) { + return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR); + } + + return queue.enqueueMapSVM( + ptr, blocking, flags, size, events, event); +} + +/** + * Enqueues to the default queue a command that will allow the host to + * update a region of a coarse-grained SVM buffer. + * This variant takes a cl::vector instance. + */ +template +inline cl_int enqueueMapSVM( + cl::vector container, + cl_bool blocking, + cl_map_flags flags, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + if (error != CL_SUCCESS) { + return detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR); + } + + return queue.enqueueMapSVM( + container, blocking, flags, events, event); +} + +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + +inline cl_int enqueueUnmapMemObject( + const Memory& memory, + void* mapped_ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + detail::errHandler(error, __ENQUEUE_MAP_BUFFER_ERR); + if (error != CL_SUCCESS) { + return error; + } + + cl_event tmp; + cl_int err = detail::errHandler( + ::clEnqueueUnmapMemObject( + queue(), memory(), mapped_ptr, + (events != NULL) ? (cl_uint)events->size() : 0, + (events != NULL && events->size() > 0) ? (cl_event*)&events->front() : NULL, + (event != NULL) ? &tmp : NULL), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + + if (event != NULL && err == CL_SUCCESS) + *event = tmp; + + return err; +} + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +/** + * Enqueues to the default queue a command that will release a coarse-grained + * SVM buffer back to the OpenCL runtime. + * This variant takes a raw SVM pointer. + */ +template +inline cl_int enqueueUnmapSVM( + T* ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + if (error != CL_SUCCESS) { + return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + } + + return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + +} + +/** + * Enqueues to the default queue a command that will release a coarse-grained + * SVM buffer back to the OpenCL runtime. + * This variant takes a cl::pointer instance. + */ +template +inline cl_int enqueueUnmapSVM( + cl::pointer &ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + if (error != CL_SUCCESS) { + return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + } + + return detail::errHandler(queue.enqueueUnmapSVM(ptr, events, event), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); +} + +/** + * Enqueues to the default queue a command that will release a coarse-grained + * SVM buffer back to the OpenCL runtime. + * This variant takes a cl::vector instance. + */ +template +inline cl_int enqueueUnmapSVM( + cl::vector &container, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + if (error != CL_SUCCESS) { + return detail::errHandler(error, __ENQUEUE_UNMAP_MEM_OBJECT_ERR); + } + + return detail::errHandler(queue.enqueueUnmapSVM(container, events, event), + __ENQUEUE_UNMAP_MEM_OBJECT_ERR); +} + +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + +inline cl_int enqueueCopyBuffer( + const Buffer& src, + const Buffer& dst, + size_type src_offset, + size_type dst_offset, + size_type size, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueCopyBuffer(src, dst, src_offset, dst_offset, size, events, event); +} + +/** + * Blocking copy operation between iterators and a buffer. + * Host to Device. + * Uses default command queue. + */ +template< typename IteratorType > +inline cl_int copy( IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer ) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + if (error != CL_SUCCESS) + return error; + + return cl::copy(queue, startIterator, endIterator, buffer); +} + +/** + * Blocking copy operation between iterators and a buffer. + * Device to Host. + * Uses default command queue. + */ +template< typename IteratorType > +inline cl_int copy( const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator ) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + if (error != CL_SUCCESS) + return error; + + return cl::copy(queue, buffer, startIterator, endIterator); +} + +/** + * Blocking copy operation between iterators and a buffer. + * Host to Device. + * Uses specified queue. + */ +template< typename IteratorType > +inline cl_int copy( const CommandQueue &queue, IteratorType startIterator, IteratorType endIterator, cl::Buffer &buffer ) +{ + typedef typename std::iterator_traits::value_type DataType; + cl_int error; + + size_type length = endIterator-startIterator; + size_type byteLength = length*sizeof(DataType); + + DataType *pointer = + static_cast(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_WRITE, 0, byteLength, 0, 0, &error)); + // if exceptions enabled, enqueueMapBuffer will throw + if( error != CL_SUCCESS ) { + return error; + } +#if defined(_MSC_VER) + std::copy( + startIterator, + endIterator, + stdext::checked_array_iterator( + pointer, length)); +#else + std::copy(startIterator, endIterator, pointer); +#endif + Event endEvent; + error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent); + // if exceptions enabled, enqueueUnmapMemObject will throw + if( error != CL_SUCCESS ) { + return error; + } + endEvent.wait(); + return CL_SUCCESS; +} + +/** + * Blocking copy operation between iterators and a buffer. + * Device to Host. + * Uses specified queue. + */ +template< typename IteratorType > +inline cl_int copy( const CommandQueue &queue, const cl::Buffer &buffer, IteratorType startIterator, IteratorType endIterator ) +{ + typedef typename std::iterator_traits::value_type DataType; + cl_int error; + + size_type length = endIterator-startIterator; + size_type byteLength = length*sizeof(DataType); + + DataType *pointer = + static_cast(queue.enqueueMapBuffer(buffer, CL_TRUE, CL_MAP_READ, 0, byteLength, 0, 0, &error)); + // if exceptions enabled, enqueueMapBuffer will throw + if( error != CL_SUCCESS ) { + return error; + } + std::copy(pointer, pointer + length, startIterator); + Event endEvent; + error = queue.enqueueUnmapMemObject(buffer, pointer, 0, &endEvent); + // if exceptions enabled, enqueueUnmapMemObject will throw + if( error != CL_SUCCESS ) { + return error; + } + endEvent.wait(); + return CL_SUCCESS; +} + + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 +/** + * Blocking SVM map operation - performs a blocking map underneath. + */ +template +inline cl_int mapSVM(cl::vector &container) +{ + return enqueueMapSVM(container, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE); +} + +/** +* Blocking SVM map operation - performs a blocking map underneath. +*/ +template +inline cl_int unmapSVM(cl::vector &container) +{ + return enqueueUnmapSVM(container); +} + +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + +#if CL_HPP_TARGET_OPENCL_VERSION >= 110 +inline cl_int enqueueReadBufferRect( + const Buffer& buffer, + cl_bool blocking, + const array& buffer_offset, + const array& host_offset, + const array& region, + size_type buffer_row_pitch, + size_type buffer_slice_pitch, + size_type host_row_pitch, + size_type host_slice_pitch, + void *ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueReadBufferRect( + buffer, + blocking, + buffer_offset, + host_offset, + region, + buffer_row_pitch, + buffer_slice_pitch, + host_row_pitch, + host_slice_pitch, + ptr, + events, + event); +} + +inline cl_int enqueueWriteBufferRect( + const Buffer& buffer, + cl_bool blocking, + const array& buffer_offset, + const array& host_offset, + const array& region, + size_type buffer_row_pitch, + size_type buffer_slice_pitch, + size_type host_row_pitch, + size_type host_slice_pitch, + const void *ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueWriteBufferRect( + buffer, + blocking, + buffer_offset, + host_offset, + region, + buffer_row_pitch, + buffer_slice_pitch, + host_row_pitch, + host_slice_pitch, + ptr, + events, + event); +} + +inline cl_int enqueueCopyBufferRect( + const Buffer& src, + const Buffer& dst, + const array& src_origin, + const array& dst_origin, + const array& region, + size_type src_row_pitch, + size_type src_slice_pitch, + size_type dst_row_pitch, + size_type dst_slice_pitch, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueCopyBufferRect( + src, + dst, + src_origin, + dst_origin, + region, + src_row_pitch, + src_slice_pitch, + dst_row_pitch, + dst_slice_pitch, + events, + event); +} +#endif // CL_HPP_TARGET_OPENCL_VERSION >= 110 + +inline cl_int enqueueReadImage( + const Image& image, + cl_bool blocking, + const array& origin, + const array& region, + size_type row_pitch, + size_type slice_pitch, + void* ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueReadImage( + image, + blocking, + origin, + region, + row_pitch, + slice_pitch, + ptr, + events, + event); +} + +inline cl_int enqueueWriteImage( + const Image& image, + cl_bool blocking, + const array& origin, + const array& region, + size_type row_pitch, + size_type slice_pitch, + const void* ptr, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueWriteImage( + image, + blocking, + origin, + region, + row_pitch, + slice_pitch, + ptr, + events, + event); +} + +inline cl_int enqueueCopyImage( + const Image& src, + const Image& dst, + const array& src_origin, + const array& dst_origin, + const array& region, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueCopyImage( + src, + dst, + src_origin, + dst_origin, + region, + events, + event); +} + +inline cl_int enqueueCopyImageToBuffer( + const Image& src, + const Buffer& dst, + const array& src_origin, + const array& region, + size_type dst_offset, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueCopyImageToBuffer( + src, + dst, + src_origin, + region, + dst_offset, + events, + event); +} + +inline cl_int enqueueCopyBufferToImage( + const Buffer& src, + const Image& dst, + size_type src_offset, + const array& dst_origin, + const array& region, + const vector* events = NULL, + Event* event = NULL) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.enqueueCopyBufferToImage( + src, + dst, + src_offset, + dst_origin, + region, + events, + event); +} + + +inline cl_int flush(void) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + return queue.flush(); +} + +inline cl_int finish(void) +{ + cl_int error; + CommandQueue queue = CommandQueue::getDefault(&error); + + if (error != CL_SUCCESS) { + return error; + } + + + return queue.finish(); +} + +class EnqueueArgs +{ +private: + CommandQueue queue_; + const NDRange offset_; + const NDRange global_; + const NDRange local_; + vector events_; + + template + friend class KernelFunctor; + +public: + EnqueueArgs(NDRange global) : + queue_(CommandQueue::getDefault()), + offset_(NullRange), + global_(global), + local_(NullRange) + { + + } + + EnqueueArgs(NDRange global, NDRange local) : + queue_(CommandQueue::getDefault()), + offset_(NullRange), + global_(global), + local_(local) + { + + } + + EnqueueArgs(NDRange offset, NDRange global, NDRange local) : + queue_(CommandQueue::getDefault()), + offset_(offset), + global_(global), + local_(local) + { + + } + + EnqueueArgs(Event e, NDRange global) : + queue_(CommandQueue::getDefault()), + offset_(NullRange), + global_(global), + local_(NullRange) + { + events_.push_back(e); + } + + EnqueueArgs(Event e, NDRange global, NDRange local) : + queue_(CommandQueue::getDefault()), + offset_(NullRange), + global_(global), + local_(local) + { + events_.push_back(e); + } + + EnqueueArgs(Event e, NDRange offset, NDRange global, NDRange local) : + queue_(CommandQueue::getDefault()), + offset_(offset), + global_(global), + local_(local) + { + events_.push_back(e); + } + + EnqueueArgs(const vector &events, NDRange global) : + queue_(CommandQueue::getDefault()), + offset_(NullRange), + global_(global), + local_(NullRange), + events_(events) + { + + } + + EnqueueArgs(const vector &events, NDRange global, NDRange local) : + queue_(CommandQueue::getDefault()), + offset_(NullRange), + global_(global), + local_(local), + events_(events) + { + + } + + EnqueueArgs(const vector &events, NDRange offset, NDRange global, NDRange local) : + queue_(CommandQueue::getDefault()), + offset_(offset), + global_(global), + local_(local), + events_(events) + { + + } + + EnqueueArgs(CommandQueue &queue, NDRange global) : + queue_(queue), + offset_(NullRange), + global_(global), + local_(NullRange) + { + + } + + EnqueueArgs(CommandQueue &queue, NDRange global, NDRange local) : + queue_(queue), + offset_(NullRange), + global_(global), + local_(local) + { + + } + + EnqueueArgs(CommandQueue &queue, NDRange offset, NDRange global, NDRange local) : + queue_(queue), + offset_(offset), + global_(global), + local_(local) + { + + } + + EnqueueArgs(CommandQueue &queue, Event e, NDRange global) : + queue_(queue), + offset_(NullRange), + global_(global), + local_(NullRange) + { + events_.push_back(e); + } + + EnqueueArgs(CommandQueue &queue, Event e, NDRange global, NDRange local) : + queue_(queue), + offset_(NullRange), + global_(global), + local_(local) + { + events_.push_back(e); + } + + EnqueueArgs(CommandQueue &queue, Event e, NDRange offset, NDRange global, NDRange local) : + queue_(queue), + offset_(offset), + global_(global), + local_(local) + { + events_.push_back(e); + } + + EnqueueArgs(CommandQueue &queue, const vector &events, NDRange global) : + queue_(queue), + offset_(NullRange), + global_(global), + local_(NullRange), + events_(events) + { + + } + + EnqueueArgs(CommandQueue &queue, const vector &events, NDRange global, NDRange local) : + queue_(queue), + offset_(NullRange), + global_(global), + local_(local), + events_(events) + { + + } + + EnqueueArgs(CommandQueue &queue, const vector &events, NDRange offset, NDRange global, NDRange local) : + queue_(queue), + offset_(offset), + global_(global), + local_(local), + events_(events) + { + + } +}; + + +//---------------------------------------------------------------------------------------------- + + +/** + * Type safe kernel functor. + * + */ +template +class KernelFunctor +{ +private: + Kernel kernel_; + + template + void setArgs(T0&& t0, T1s&&... t1s) + { + kernel_.setArg(index, t0); + setArgs(std::forward(t1s)...); + } + + template + void setArgs(T0&& t0) + { + kernel_.setArg(index, t0); + } + + template + void setArgs() + { + } + + +public: + KernelFunctor(Kernel kernel) : kernel_(kernel) + {} + + KernelFunctor( + const Program& program, + const string name, + cl_int * err = NULL) : + kernel_(program, name.c_str(), err) + {} + + //! \brief Return type of the functor + typedef Event result_type; + + /** + * Enqueue kernel. + * @param args Launch parameters of the kernel. + * @param t0... List of kernel arguments based on the template type of the functor. + */ + Event operator() ( + const EnqueueArgs& args, + Ts... ts) + { + Event event; + setArgs<0>(std::forward(ts)...); + + args.queue_.enqueueNDRangeKernel( + kernel_, + args.offset_, + args.global_, + args.local_, + &args.events_, + &event); + + return event; + } + + /** + * Enqueue kernel with support for error code. + * @param args Launch parameters of the kernel. + * @param t0... List of kernel arguments based on the template type of the functor. + * @param error Out parameter returning the error code from the execution. + */ + Event operator() ( + const EnqueueArgs& args, + Ts... ts, + cl_int &error) + { + Event event; + setArgs<0>(std::forward(ts)...); + + error = args.queue_.enqueueNDRangeKernel( + kernel_, + args.offset_, + args.global_, + args.local_, + &args.events_, + &event); + + return event; + } + +#if CL_HPP_TARGET_OPENCL_VERSION >= 200 + cl_int setSVMPointers(const vector &pointerList) + { + return kernel_.setSVMPointers(pointerList); + } + + template + cl_int setSVMPointers(const T0 &t0, T1s... ts) + { + return kernel_.setSVMPointers(t0, ts...); + } +#endif // #if CL_HPP_TARGET_OPENCL_VERSION >= 200 + + Kernel getKernel() + { + return kernel_; + } +}; + +namespace compatibility { + /** + * Backward compatibility class to ensure that cl.hpp code works with cl2.hpp. + * Please use KernelFunctor directly. + */ + template + struct make_kernel + { + typedef KernelFunctor FunctorType; + + FunctorType functor_; + + make_kernel( + const Program& program, + const string name, + cl_int * err = NULL) : + functor_(FunctorType(program, name, err)) + {} + + make_kernel( + const Kernel kernel) : + functor_(FunctorType(kernel)) + {} + + //! \brief Return type of the functor + typedef Event result_type; + + //! \brief Function signature of kernel functor with no event dependency. + typedef Event type_( + const EnqueueArgs&, + Ts...); + + Event operator()( + const EnqueueArgs& enqueueArgs, + Ts... args) + { + return functor_( + enqueueArgs, args...); + } + }; +} // namespace compatibility + + +//---------------------------------------------------------------------------------------------------------------------- + +#undef CL_HPP_ERR_STR_ +#if !defined(CL_HPP_USER_OVERRIDE_ERROR_STRINGS) +#undef __GET_DEVICE_INFO_ERR +#undef __GET_PLATFORM_INFO_ERR +#undef __GET_DEVICE_IDS_ERR +#undef __GET_CONTEXT_INFO_ERR +#undef __GET_EVENT_INFO_ERR +#undef __GET_EVENT_PROFILE_INFO_ERR +#undef __GET_MEM_OBJECT_INFO_ERR +#undef __GET_IMAGE_INFO_ERR +#undef __GET_SAMPLER_INFO_ERR +#undef __GET_KERNEL_INFO_ERR +#undef __GET_KERNEL_ARG_INFO_ERR +#undef __GET_KERNEL_WORK_GROUP_INFO_ERR +#undef __GET_PROGRAM_INFO_ERR +#undef __GET_PROGRAM_BUILD_INFO_ERR +#undef __GET_COMMAND_QUEUE_INFO_ERR + +#undef __CREATE_CONTEXT_ERR +#undef __CREATE_CONTEXT_FROM_TYPE_ERR +#undef __GET_SUPPORTED_IMAGE_FORMATS_ERR + +#undef __CREATE_BUFFER_ERR +#undef __CREATE_SUBBUFFER_ERR +#undef __CREATE_IMAGE2D_ERR +#undef __CREATE_IMAGE3D_ERR +#undef __CREATE_SAMPLER_ERR +#undef __SET_MEM_OBJECT_DESTRUCTOR_CALLBACK_ERR + +#undef __CREATE_USER_EVENT_ERR +#undef __SET_USER_EVENT_STATUS_ERR +#undef __SET_EVENT_CALLBACK_ERR +#undef __SET_PRINTF_CALLBACK_ERR + +#undef __WAIT_FOR_EVENTS_ERR + +#undef __CREATE_KERNEL_ERR +#undef __SET_KERNEL_ARGS_ERR +#undef __CREATE_PROGRAM_WITH_SOURCE_ERR +#undef __CREATE_PROGRAM_WITH_BINARY_ERR +#undef __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR +#undef __BUILD_PROGRAM_ERR +#undef __CREATE_KERNELS_IN_PROGRAM_ERR + +#undef __CREATE_COMMAND_QUEUE_ERR +#undef __SET_COMMAND_QUEUE_PROPERTY_ERR +#undef __ENQUEUE_READ_BUFFER_ERR +#undef __ENQUEUE_WRITE_BUFFER_ERR +#undef __ENQUEUE_READ_BUFFER_RECT_ERR +#undef __ENQUEUE_WRITE_BUFFER_RECT_ERR +#undef __ENQEUE_COPY_BUFFER_ERR +#undef __ENQEUE_COPY_BUFFER_RECT_ERR +#undef __ENQUEUE_READ_IMAGE_ERR +#undef __ENQUEUE_WRITE_IMAGE_ERR +#undef __ENQUEUE_COPY_IMAGE_ERR +#undef __ENQUEUE_COPY_IMAGE_TO_BUFFER_ERR +#undef __ENQUEUE_COPY_BUFFER_TO_IMAGE_ERR +#undef __ENQUEUE_MAP_BUFFER_ERR +#undef __ENQUEUE_MAP_IMAGE_ERR +#undef __ENQUEUE_UNMAP_MEM_OBJECT_ERR +#undef __ENQUEUE_NDRANGE_KERNEL_ERR +#undef __ENQUEUE_TASK_ERR +#undef __ENQUEUE_NATIVE_KERNEL + +#undef __UNLOAD_COMPILER_ERR +#undef __CREATE_SUB_DEVICES_ERR + +#undef __CREATE_PIPE_ERR +#undef __GET_PIPE_INFO_ERR + +#endif //CL_HPP_USER_OVERRIDE_ERROR_STRINGS + +// Extensions +#undef CL_HPP_INIT_CL_EXT_FCN_PTR_ +#undef CL_HPP_INIT_CL_EXT_FCN_PTR_PLATFORM_ + +#if defined(CL_HPP_USE_CL_DEVICE_FISSION) +#undef CL_HPP_PARAM_NAME_DEVICE_FISSION_ +#endif // CL_HPP_USE_CL_DEVICE_FISSION + +#undef CL_HPP_NOEXCEPT_ +#undef CL_HPP_DEFINE_STATIC_MEMBER_ + +} // namespace cl + +#endif // CL_HPP_ diff --git a/sources/pvrsdk/Include/CL/cl_d3d10.h b/sources/pvrsdk/Include/CL/cl_d3d10.h new file mode 100755 index 00000000..d5960a43 --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl_d3d10.h @@ -0,0 +1,131 @@ +/********************************************************************************** + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + **********************************************************************************/ + +/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */ + +#ifndef __OPENCL_CL_D3D10_H +#define __OPENCL_CL_D3D10_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************** + * cl_khr_d3d10_sharing */ +#define cl_khr_d3d10_sharing 1 + +typedef cl_uint cl_d3d10_device_source_khr; +typedef cl_uint cl_d3d10_device_set_khr; + +/******************************************************************************/ + +/* Error Codes */ +#define CL_INVALID_D3D10_DEVICE_KHR -1002 +#define CL_INVALID_D3D10_RESOURCE_KHR -1003 +#define CL_D3D10_RESOURCE_ALREADY_ACQUIRED_KHR -1004 +#define CL_D3D10_RESOURCE_NOT_ACQUIRED_KHR -1005 + +/* cl_d3d10_device_source_nv */ +#define CL_D3D10_DEVICE_KHR 0x4010 +#define CL_D3D10_DXGI_ADAPTER_KHR 0x4011 + +/* cl_d3d10_device_set_nv */ +#define CL_PREFERRED_DEVICES_FOR_D3D10_KHR 0x4012 +#define CL_ALL_DEVICES_FOR_D3D10_KHR 0x4013 + +/* cl_context_info */ +#define CL_CONTEXT_D3D10_DEVICE_KHR 0x4014 +#define CL_CONTEXT_D3D10_PREFER_SHARED_RESOURCES_KHR 0x402C + +/* cl_mem_info */ +#define CL_MEM_D3D10_RESOURCE_KHR 0x4015 + +/* cl_image_info */ +#define CL_IMAGE_D3D10_SUBRESOURCE_KHR 0x4016 + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_D3D10_OBJECTS_KHR 0x4017 +#define CL_COMMAND_RELEASE_D3D10_OBJECTS_KHR 0x4018 + +/******************************************************************************/ + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetDeviceIDsFromD3D10KHR_fn)( + cl_platform_id platform, + cl_d3d10_device_source_khr d3d_device_source, + void * d3d_object, + cl_d3d10_device_set_khr d3d_device_set, + cl_uint num_entries, + cl_device_id * devices, + cl_uint * num_devices) CL_API_SUFFIX__VERSION_1_0; + +typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D10BufferKHR_fn)( + cl_context context, + cl_mem_flags flags, + ID3D10Buffer * resource, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D10Texture2DKHR_fn)( + cl_context context, + cl_mem_flags flags, + ID3D10Texture2D * resource, + UINT subresource, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D10Texture3DKHR_fn)( + cl_context context, + cl_mem_flags flags, + ID3D10Texture3D * resource, + UINT subresource, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueAcquireD3D10ObjectsKHR_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem * mem_objects, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueReleaseD3D10ObjectsKHR_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem * mem_objects, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_0; + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_CL_D3D10_H */ + diff --git a/sources/pvrsdk/Include/CL/cl_d3d11.h b/sources/pvrsdk/Include/CL/cl_d3d11.h new file mode 100755 index 00000000..39f90723 --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl_d3d11.h @@ -0,0 +1,131 @@ +/********************************************************************************** + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + **********************************************************************************/ + +/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */ + +#ifndef __OPENCL_CL_D3D11_H +#define __OPENCL_CL_D3D11_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************** + * cl_khr_d3d11_sharing */ +#define cl_khr_d3d11_sharing 1 + +typedef cl_uint cl_d3d11_device_source_khr; +typedef cl_uint cl_d3d11_device_set_khr; + +/******************************************************************************/ + +/* Error Codes */ +#define CL_INVALID_D3D11_DEVICE_KHR -1006 +#define CL_INVALID_D3D11_RESOURCE_KHR -1007 +#define CL_D3D11_RESOURCE_ALREADY_ACQUIRED_KHR -1008 +#define CL_D3D11_RESOURCE_NOT_ACQUIRED_KHR -1009 + +/* cl_d3d11_device_source */ +#define CL_D3D11_DEVICE_KHR 0x4019 +#define CL_D3D11_DXGI_ADAPTER_KHR 0x401A + +/* cl_d3d11_device_set */ +#define CL_PREFERRED_DEVICES_FOR_D3D11_KHR 0x401B +#define CL_ALL_DEVICES_FOR_D3D11_KHR 0x401C + +/* cl_context_info */ +#define CL_CONTEXT_D3D11_DEVICE_KHR 0x401D +#define CL_CONTEXT_D3D11_PREFER_SHARED_RESOURCES_KHR 0x402D + +/* cl_mem_info */ +#define CL_MEM_D3D11_RESOURCE_KHR 0x401E + +/* cl_image_info */ +#define CL_IMAGE_D3D11_SUBRESOURCE_KHR 0x401F + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_D3D11_OBJECTS_KHR 0x4020 +#define CL_COMMAND_RELEASE_D3D11_OBJECTS_KHR 0x4021 + +/******************************************************************************/ + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetDeviceIDsFromD3D11KHR_fn)( + cl_platform_id platform, + cl_d3d11_device_source_khr d3d_device_source, + void * d3d_object, + cl_d3d11_device_set_khr d3d_device_set, + cl_uint num_entries, + cl_device_id * devices, + cl_uint * num_devices) CL_API_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D11BufferKHR_fn)( + cl_context context, + cl_mem_flags flags, + ID3D11Buffer * resource, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D11Texture2DKHR_fn)( + cl_context context, + cl_mem_flags flags, + ID3D11Texture2D * resource, + UINT subresource, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromD3D11Texture3DKHR_fn)( + cl_context context, + cl_mem_flags flags, + ID3D11Texture3D * resource, + UINT subresource, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueAcquireD3D11ObjectsKHR_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem * mem_objects, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueReleaseD3D11ObjectsKHR_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem * mem_objects, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_CL_D3D11_H */ + diff --git a/sources/pvrsdk/Include/CL/cl_dx9_media_sharing.h b/sources/pvrsdk/Include/CL/cl_dx9_media_sharing.h new file mode 100755 index 00000000..2729e8b9 --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl_dx9_media_sharing.h @@ -0,0 +1,132 @@ +/********************************************************************************** + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + **********************************************************************************/ + +/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */ + +#ifndef __OPENCL_CL_DX9_MEDIA_SHARING_H +#define __OPENCL_CL_DX9_MEDIA_SHARING_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/******************************************************************************/ +/* cl_khr_dx9_media_sharing */ +#define cl_khr_dx9_media_sharing 1 + +typedef cl_uint cl_dx9_media_adapter_type_khr; +typedef cl_uint cl_dx9_media_adapter_set_khr; + +#if defined(_WIN32) +#include +typedef struct _cl_dx9_surface_info_khr +{ + IDirect3DSurface9 *resource; + HANDLE shared_handle; +} cl_dx9_surface_info_khr; +#endif + + +/******************************************************************************/ + +/* Error Codes */ +#define CL_INVALID_DX9_MEDIA_ADAPTER_KHR -1010 +#define CL_INVALID_DX9_MEDIA_SURFACE_KHR -1011 +#define CL_DX9_MEDIA_SURFACE_ALREADY_ACQUIRED_KHR -1012 +#define CL_DX9_MEDIA_SURFACE_NOT_ACQUIRED_KHR -1013 + +/* cl_media_adapter_type_khr */ +#define CL_ADAPTER_D3D9_KHR 0x2020 +#define CL_ADAPTER_D3D9EX_KHR 0x2021 +#define CL_ADAPTER_DXVA_KHR 0x2022 + +/* cl_media_adapter_set_khr */ +#define CL_PREFERRED_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2023 +#define CL_ALL_DEVICES_FOR_DX9_MEDIA_ADAPTER_KHR 0x2024 + +/* cl_context_info */ +#define CL_CONTEXT_ADAPTER_D3D9_KHR 0x2025 +#define CL_CONTEXT_ADAPTER_D3D9EX_KHR 0x2026 +#define CL_CONTEXT_ADAPTER_DXVA_KHR 0x2027 + +/* cl_mem_info */ +#define CL_MEM_DX9_MEDIA_ADAPTER_TYPE_KHR 0x2028 +#define CL_MEM_DX9_MEDIA_SURFACE_INFO_KHR 0x2029 + +/* cl_image_info */ +#define CL_IMAGE_DX9_MEDIA_PLANE_KHR 0x202A + +/* cl_command_type */ +#define CL_COMMAND_ACQUIRE_DX9_MEDIA_SURFACES_KHR 0x202B +#define CL_COMMAND_RELEASE_DX9_MEDIA_SURFACES_KHR 0x202C + +/******************************************************************************/ + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetDeviceIDsFromDX9MediaAdapterKHR_fn)( + cl_platform_id platform, + cl_uint num_media_adapters, + cl_dx9_media_adapter_type_khr * media_adapter_type, + void * media_adapters, + cl_dx9_media_adapter_set_khr media_adapter_set, + cl_uint num_entries, + cl_device_id * devices, + cl_uint * num_devices) CL_API_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_mem (CL_API_CALL *clCreateFromDX9MediaSurfaceKHR_fn)( + cl_context context, + cl_mem_flags flags, + cl_dx9_media_adapter_type_khr adapter_type, + void * surface_info, + cl_uint plane, + cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueAcquireDX9MediaSurfacesKHR_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem * mem_objects, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clEnqueueReleaseDX9MediaSurfacesKHR_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem * mem_objects, + cl_uint num_events_in_wait_list, + const cl_event * event_wait_list, + cl_event * event) CL_API_SUFFIX__VERSION_1_2; + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_CL_DX9_MEDIA_SHARING_H */ + diff --git a/sources/pvrsdk/Include/CL/cl_egl.h b/sources/pvrsdk/Include/CL/cl_egl.h new file mode 100755 index 00000000..a58b562f --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl_egl.h @@ -0,0 +1,136 @@ +/******************************************************************************* + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + ******************************************************************************/ + +#ifndef __OPENCL_CL_EGL_H +#define __OPENCL_CL_EGL_H + +#ifdef __APPLE__ + +#else +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + + +/* Command type for events created with clEnqueueAcquireEGLObjectsKHR */ +#define CL_COMMAND_EGL_FENCE_SYNC_OBJECT_KHR 0x202F +#define CL_COMMAND_ACQUIRE_EGL_OBJECTS_KHR 0x202D +#define CL_COMMAND_RELEASE_EGL_OBJECTS_KHR 0x202E + +/* Error type for clCreateFromEGLImageKHR */ +#define CL_INVALID_EGL_OBJECT_KHR -1093 +#define CL_EGL_RESOURCE_NOT_ACQUIRED_KHR -1092 + +/* CLeglImageKHR is an opaque handle to an EGLImage */ +typedef void* CLeglImageKHR; + +/* CLeglDisplayKHR is an opaque handle to an EGLDisplay */ +typedef void* CLeglDisplayKHR; + +/* CLeglSyncKHR is an opaque handle to an EGLSync object */ +typedef void* CLeglSyncKHR; + +/* properties passed to clCreateFromEGLImageKHR */ +typedef intptr_t cl_egl_image_properties_khr; + + +#define cl_khr_egl_image 1 +#ifndef CL_NO_PROTOTYPES +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromEGLImageKHR(cl_context /* context */, + CLeglDisplayKHR /* egldisplay */, + CLeglImageKHR /* eglimage */, + cl_mem_flags /* flags */, + const cl_egl_image_properties_khr* /* properties */, + cl_int* /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; +#endif +typedef CL_API_ENTRY cl_mem(CL_API_CALL* clCreateFromEGLImageKHR_fn)( + cl_context context, + CLeglDisplayKHR egldisplay, + CLeglImageKHR eglimage, + cl_mem_flags flags, + const cl_egl_image_properties_khr* properties, + cl_int* errcode_ret); + +#ifndef CL_NO_PROTOTYPES +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireEGLObjectsKHR(cl_command_queue /* command_queue */, + cl_uint /* num_objects */, + const cl_mem* /* mem_objects */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */) CL_API_SUFFIX__VERSION_1_0; +#endif +typedef CL_API_ENTRY cl_int(CL_API_CALL* clEnqueueAcquireEGLObjectsKHR_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + +#ifndef CL_NO_PROTOTYPES +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseEGLObjectsKHR(cl_command_queue /* command_queue */, + cl_uint /* num_objects */, + const cl_mem* /* mem_objects */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */) CL_API_SUFFIX__VERSION_1_0; +#endif +typedef CL_API_ENTRY cl_int(CL_API_CALL* clEnqueueReleaseEGLObjectsKHR_fn)( + cl_command_queue command_queue, + cl_uint num_objects, + const cl_mem* mem_objects, + cl_uint num_events_in_wait_list, + const cl_event* event_wait_list, + cl_event* event); + + +#define cl_khr_egl_event 1 +#ifndef CL_NO_PROTOTYPES +extern CL_API_ENTRY cl_event CL_API_CALL +clCreateEventFromEGLSyncKHR(cl_context /* context */, + CLeglSyncKHR /* sync */, + CLeglDisplayKHR /* display */, + cl_int* /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; +#endif +typedef CL_API_ENTRY cl_event(CL_API_CALL* clCreateEventFromEGLSyncKHR_fn)( + cl_context context, + CLeglSyncKHR sync, + CLeglDisplayKHR display, + cl_int* errcode_ret); + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_CL_EGL_H */ diff --git a/sources/pvrsdk/Include/CL/cl_ext.h b/sources/pvrsdk/Include/CL/cl_ext.h new file mode 100755 index 00000000..5078e8f4 --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl_ext.h @@ -0,0 +1,607 @@ +/******************************************************************************* + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + ******************************************************************************/ + +/* $Revision: 11928 $ on $Date: 2010-07-13 09:04:56 -0700 (Tue, 13 Jul 2010) $ */ + +/* cl_ext.h contains OpenCL extensions which don't have external */ +/* (OpenGL, D3D) dependencies. */ + +#ifndef __CL_EXT_H +#define __CL_EXT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __APPLE__ + #include + #include +#else + #include +#endif + +/* cl_khr_fp16 extension - no extension #define since it has no functions */ +#define CL_DEVICE_HALF_FP_CONFIG 0x1033 + +/* Memory object destruction + * + * Apple extension for use to manage externally allocated buffers used with cl_mem objects with CL_MEM_USE_HOST_PTR + * + * Registers a user callback function that will be called when the memory object is deleted and its resources + * freed. Each call to clSetMemObjectCallbackFn registers the specified user callback function on a callback + * stack associated with memobj. The registered user callback functions are called in the reverse order in + * which they were registered. The user callback functions are called and then the memory object is deleted + * and its resources freed. This provides a mechanism for the application (and libraries) using memobj to be + * notified when the memory referenced by host_ptr, specified when the memory object is created and used as + * the storage bits for the memory object, can be reused or freed. + * + * The application may not call CL api's with the cl_mem object passed to the pfn_notify. + * + * Please check for the "cl_APPLE_SetMemObjectDestructor" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS) + * before using. + */ +#define cl_APPLE_SetMemObjectDestructor 1 +cl_int CL_API_ENTRY clSetMemObjectDestructorAPPLE( cl_mem /* memobj */, + void (* /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/), + void * /*user_data */ ) CL_EXT_SUFFIX__VERSION_1_0; + + +/* Context Logging Functions + * + * The next three convenience functions are intended to be used as the pfn_notify parameter to clCreateContext(). + * Please check for the "cl_APPLE_ContextLoggingFunctions" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS) + * before using. + * + * clLogMessagesToSystemLog fowards on all log messages to the Apple System Logger + */ +#define cl_APPLE_ContextLoggingFunctions 1 +extern void CL_API_ENTRY clLogMessagesToSystemLogAPPLE( const char * /* errstr */, + const void * /* private_info */, + size_t /* cb */, + void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0; + +/* clLogMessagesToStdout sends all log messages to the file descriptor stdout */ +extern void CL_API_ENTRY clLogMessagesToStdoutAPPLE( const char * /* errstr */, + const void * /* private_info */, + size_t /* cb */, + void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0; + +/* clLogMessagesToStderr sends all log messages to the file descriptor stderr */ +extern void CL_API_ENTRY clLogMessagesToStderrAPPLE( const char * /* errstr */, + const void * /* private_info */, + size_t /* cb */, + void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0; + + +/************************ +* cl_khr_icd extension * +************************/ +#define cl_khr_icd 1 + +/* cl_platform_info */ +#define CL_PLATFORM_ICD_SUFFIX_KHR 0x0920 + +/* Additional Error Codes */ +#define CL_PLATFORM_NOT_FOUND_KHR -1001 + +extern CL_API_ENTRY cl_int CL_API_CALL +clIcdGetPlatformIDsKHR(cl_uint /* num_entries */, + cl_platform_id * /* platforms */, + cl_uint * /* num_platforms */); + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clIcdGetPlatformIDsKHR_fn)( + cl_uint /* num_entries */, + cl_platform_id * /* platforms */, + cl_uint * /* num_platforms */); + + +/* Extension: cl_khr_image2D_buffer + * + * This extension allows a 2D image to be created from a cl_mem buffer without a copy. + * The type associated with a 2D image created from a buffer in an OpenCL program is image2d_t. + * Both the sampler and sampler-less read_image built-in functions are supported for 2D images + * and 2D images created from a buffer. Similarly, the write_image built-ins are also supported + * for 2D images created from a buffer. + * + * When the 2D image from buffer is created, the client must specify the width, + * height, image format (i.e. channel order and channel data type) and optionally the row pitch + * + * The pitch specified must be a multiple of CL_DEVICE_IMAGE_PITCH_ALIGNMENT pixels. + * The base address of the buffer must be aligned to CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT pixels. + */ + +/************************************* + * cl_khr_initalize_memory extension * + *************************************/ + +#define CL_CONTEXT_MEMORY_INITIALIZE_KHR 0x2030 + + +/************************************** + * cl_khr_terminate_context extension * + **************************************/ + +#define CL_DEVICE_TERMINATE_CAPABILITY_KHR 0x2031 +#define CL_CONTEXT_TERMINATE_KHR 0x2032 + +#define cl_khr_terminate_context 1 +extern CL_API_ENTRY cl_int CL_API_CALL clTerminateContextKHR(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clTerminateContextKHR_fn)(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2; + + +/* + * Extension: cl_khr_spir + * + * This extension adds support to create an OpenCL program object from a + * Standard Portable Intermediate Representation (SPIR) instance + */ + +#define CL_DEVICE_SPIR_VERSIONS 0x40E0 +#define CL_PROGRAM_BINARY_TYPE_INTERMEDIATE 0x40E1 + + +/****************************************** +* cl_nv_device_attribute_query extension * +******************************************/ +/* cl_nv_device_attribute_query extension - no extension #define since it has no functions */ +#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000 +#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001 +#define CL_DEVICE_REGISTERS_PER_BLOCK_NV 0x4002 +#define CL_DEVICE_WARP_SIZE_NV 0x4003 +#define CL_DEVICE_GPU_OVERLAP_NV 0x4004 +#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005 +#define CL_DEVICE_INTEGRATED_MEMORY_NV 0x4006 + +/********************************* +* cl_amd_device_attribute_query * +*********************************/ +#define CL_DEVICE_PROFILING_TIMER_OFFSET_AMD 0x4036 + +/********************************* +* cl_arm_printf extension +*********************************/ +#define CL_PRINTF_CALLBACK_ARM 0x40B0 +#define CL_PRINTF_BUFFERSIZE_ARM 0x40B1 + +#ifdef CL_VERSION_1_1 + /*********************************** + * cl_ext_device_fission extension * + ***********************************/ + #define cl_ext_device_fission 1 + + extern CL_API_ENTRY cl_int CL_API_CALL + clReleaseDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1; + + typedef CL_API_ENTRY cl_int + (CL_API_CALL *clReleaseDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1; + + extern CL_API_ENTRY cl_int CL_API_CALL + clRetainDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1; + + typedef CL_API_ENTRY cl_int + (CL_API_CALL *clRetainDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1; + + typedef cl_ulong cl_device_partition_property_ext; + extern CL_API_ENTRY cl_int CL_API_CALL + clCreateSubDevicesEXT( cl_device_id /*in_device*/, + const cl_device_partition_property_ext * /* properties */, + cl_uint /*num_entries*/, + cl_device_id * /*out_devices*/, + cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1; + + typedef CL_API_ENTRY cl_int + ( CL_API_CALL * clCreateSubDevicesEXT_fn)( cl_device_id /*in_device*/, + const cl_device_partition_property_ext * /* properties */, + cl_uint /*num_entries*/, + cl_device_id * /*out_devices*/, + cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1; + + /* cl_device_partition_property_ext */ + #define CL_DEVICE_PARTITION_EQUALLY_EXT 0x4050 + #define CL_DEVICE_PARTITION_BY_COUNTS_EXT 0x4051 + #define CL_DEVICE_PARTITION_BY_NAMES_EXT 0x4052 + #define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN_EXT 0x4053 + + /* clDeviceGetInfo selectors */ + #define CL_DEVICE_PARENT_DEVICE_EXT 0x4054 + #define CL_DEVICE_PARTITION_TYPES_EXT 0x4055 + #define CL_DEVICE_AFFINITY_DOMAINS_EXT 0x4056 + #define CL_DEVICE_REFERENCE_COUNT_EXT 0x4057 + #define CL_DEVICE_PARTITION_STYLE_EXT 0x4058 + + /* error codes */ + #define CL_DEVICE_PARTITION_FAILED_EXT -1057 + #define CL_INVALID_PARTITION_COUNT_EXT -1058 + #define CL_INVALID_PARTITION_NAME_EXT -1059 + + /* CL_AFFINITY_DOMAINs */ + #define CL_AFFINITY_DOMAIN_L1_CACHE_EXT 0x1 + #define CL_AFFINITY_DOMAIN_L2_CACHE_EXT 0x2 + #define CL_AFFINITY_DOMAIN_L3_CACHE_EXT 0x3 + #define CL_AFFINITY_DOMAIN_L4_CACHE_EXT 0x4 + #define CL_AFFINITY_DOMAIN_NUMA_EXT 0x10 + #define CL_AFFINITY_DOMAIN_NEXT_FISSIONABLE_EXT 0x100 + + /* cl_device_partition_property_ext list terminators */ + #define CL_PROPERTIES_LIST_END_EXT ((cl_device_partition_property_ext) 0) + #define CL_PARTITION_BY_COUNTS_LIST_END_EXT ((cl_device_partition_property_ext) 0) + #define CL_PARTITION_BY_NAMES_LIST_END_EXT ((cl_device_partition_property_ext) 0 - 1) + +/********************************* +* cl_qcom_ext_host_ptr extension +*********************************/ + +#define CL_MEM_EXT_HOST_PTR_QCOM (1 << 29) + +#define CL_DEVICE_EXT_MEM_PADDING_IN_BYTES_QCOM 0x40A0 +#define CL_DEVICE_PAGE_SIZE_QCOM 0x40A1 +#define CL_IMAGE_ROW_ALIGNMENT_QCOM 0x40A2 +#define CL_IMAGE_SLICE_ALIGNMENT_QCOM 0x40A3 +#define CL_MEM_HOST_UNCACHED_QCOM 0x40A4 +#define CL_MEM_HOST_WRITEBACK_QCOM 0x40A5 +#define CL_MEM_HOST_WRITETHROUGH_QCOM 0x40A6 +#define CL_MEM_HOST_WRITE_COMBINING_QCOM 0x40A7 + +typedef cl_uint cl_image_pitch_info_qcom; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetDeviceImageInfoQCOM(cl_device_id device, + size_t image_width, + size_t image_height, + const cl_image_format *image_format, + cl_image_pitch_info_qcom param_name, + size_t param_value_size, + void *param_value, + size_t *param_value_size_ret); + +typedef struct _cl_mem_ext_host_ptr +{ + /* Type of external memory allocation. */ + /* Legal values will be defined in layered extensions. */ + cl_uint allocation_type; + + /* Host cache policy for this external memory allocation. */ + cl_uint host_cache_policy; + +} cl_mem_ext_host_ptr; + +/********************************* +* cl_qcom_ion_host_ptr extension +*********************************/ + +#define CL_MEM_ION_HOST_PTR_QCOM 0x40A8 + +typedef struct _cl_mem_ion_host_ptr +{ + /* Type of external memory allocation. */ + /* Must be CL_MEM_ION_HOST_PTR_QCOM for ION allocations. */ + cl_mem_ext_host_ptr ext_host_ptr; + + /* ION file descriptor */ + int ion_filedesc; + + /* Host pointer to the ION allocated memory */ + void* ion_hostptr; + +} cl_mem_ion_host_ptr; + +#endif /* CL_VERSION_1_1 */ + +#if defined(CL_VERSION_1_2) + +/****************************************** + * cl_img_yuv_image extension * + ******************************************/ + +/* Image formats used in clCreateImage */ +#define CL_NV21_IMG 0x40D0 +#define CL_YV12_IMG 0x40D1 + +/****************************************** + * cl_img_cached_allocations extension * + ******************************************/ + +/* Flag values used by clCreteBuffer */ +#define CL_MEM_USE_UNCACHED_CPU_MEMORY_IMG (1 << 26) +#define CL_MEM_USE_CACHED_CPU_MEMORY_IMG (1 << 27) + +/****************************************** + * cl_img_use_gralloc_ptr extension * + ******************************************/ + +/* Flag values used by clCreteBuffer */ +#define CL_MEM_USE_GRALLOC_PTR_IMG (1 << 28) + +/* To be used by clGetEventInfo: */ +#define CL_COMMAND_ACQUIRE_GRALLOC_OBJECTS_IMG 0x40D2 +#define CL_COMMAND_RELEASE_GRALLOC_OBJECTS_IMG 0x40D3 + +/* Error code from clEnqueueReleaseGrallocObjectsIMG */ +#define CL_GRALLOC_RESOURCE_NOT_ACQUIRED_IMG 0x40D4 + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireGrallocObjectsIMG(cl_command_queue /* command_queue */, + cl_uint /* num_objects */, + const cl_mem * /* mem_objects */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseGrallocObjectsIMG(cl_command_queue /* command_queue */, + cl_uint /* num_objects */, + const cl_mem * /* mem_objects */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_2; + +#endif /* CL_VERSION_1_2 */ + +#ifdef CL_VERSION_2_0 +/********************************* +* cl_khr_subgroups extension +*********************************/ +#define cl_khr_subgroups 1 + +/* cl_kernel_sub_group_info is declared in CL.h. */ + +/* cl_kernel_sub_group_info */ +#define CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE_KHR 0x2033 +#define CL_KERNEL_SUB_GROUP_COUNT_FOR_NDRANGE_KHR 0x2034 + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetKernelSubGroupInfoKHR(cl_kernel /* in_kernel */, + cl_device_id /*in_device*/, + cl_kernel_sub_group_info /* param_name */, + size_t /*input_value_size*/, + const void * /*input_value*/, + size_t /*param_value_size*/, + void* /*param_value*/, + size_t* /*param_value_size_ret*/ ) CL_EXT_SUFFIX__VERSION_2_0_DEPRECATED; + +typedef CL_API_ENTRY cl_int + ( CL_API_CALL * clGetKernelSubGroupInfoKHR_fn)(cl_kernel /* in_kernel */, + cl_device_id /*in_device*/, + cl_kernel_sub_group_info /* param_name */, + size_t /*input_value_size*/, + const void * /*input_value*/, + size_t /*param_value_size*/, + void* /*param_value*/, + size_t* /*param_value_size_ret*/ ) CL_EXT_SUFFIX__VERSION_2_0_DEPRECATED; +#endif /* CL_VERSION_2_0 */ + +#ifdef CL_VERSION_2_1 +/********************************* +* cl_khr_priority_hints extension +*********************************/ +#define cl_khr_priority_hints 1 + +typedef cl_uint cl_queue_priority_khr; + +/* cl_command_queue_properties */ +#define CL_QUEUE_PRIORITY_KHR 0x1096 + +/* cl_queue_priority_khr */ +#define CL_QUEUE_PRIORITY_HIGH_KHR (1<<0) +#define CL_QUEUE_PRIORITY_MED_KHR (1<<1) +#define CL_QUEUE_PRIORITY_LOW_KHR (1<<2) + +#endif /* CL_VERSION_2_1 */ + +#ifdef CL_VERSION_2_1 +/********************************* +* cl_khr_throttle_hints extension +*********************************/ +#define cl_khr_throttle_hints 1 + +typedef cl_uint cl_queue_throttle_khr; + +/* cl_command_queue_properties */ +#define CL_QUEUE_THROTTLE_KHR 0x1097 + +/* cl_queue_throttle_khr */ +#define CL_QUEUE_THROTTLE_HIGH_KHR (1<<0) +#define CL_QUEUE_THROTTLE_MED_KHR (1<<1) +#define CL_QUEUE_THROTTLE_LOW_KHR (1<<2) + +#endif /* CL_VERSION_2_1 */ + +#ifdef CL_VERSION_2_2 +/********************************* +* cl_khr_subgroup_named_barrier +*********************************/ +#define cl_khr_subgroup_named_barrier 1 + +/* cl_device_info */ +#define CL_DEVICE_MAX_NAMED_BARRIER_COUNT_KHR 0x2035 + +#endif /* CL_VERSION_2_2 */ + +/********************************** + * cl_arm_import_memory extension * + **********************************/ + +#ifdef CL_VERSION_1_0 + +typedef intptr_t cl_import_properties_arm; + +/* Default and valid proporties name for cl_arm_import_memory */ +#define CL_IMPORT_TYPE_ARM 0x40B2 + +/* Host process memory type default value for CL_IMPORT_TYPE_ARM property */ +#define CL_IMPORT_TYPE_HOST_ARM 0x40B3 + +/* DMA BUF memory type value for CL_IMPORT_TYPE_ARM property */ +#define CL_IMPORT_TYPE_DMA_BUF_ARM 0x40B4 + +/* Secure DMA BUF memory type value for CL_IMPORT_TYPE_ARM property */ +#define CL_IMPORT_TYPE_SECURE_ARM 0x40B5 + +/* This extension adds a new function that allows for direct memory import into + * OpenCL via the clImportMemoryARM function. + * + * Memory imported through this interface will be mapped into the device's page + * tables directly, providing zero copy access. It will never fall back to copy + * operations and aliased buffers. + * + * Types of memory supported for import are specified as additional extension + * strings. + * + * This extension produces cl_mem allocations which are compatible with all other + * users of cl_mem in the standard API. + * + * This extension maps pages with the same properties as the normal buffer creation + * function clCreateBuffer. + */ +extern CL_API_ENTRY cl_mem CL_API_CALL +clImportMemoryARM( cl_context context, + cl_mem_flags flags, + const cl_import_properties_arm *properties, + void *memory, + size_t size, + cl_int *errcode_ret) CL_EXT_SUFFIX__VERSION_1_0; + + +#endif /* CL_VERSION_1_0 */ + +/****************************************** + * cl_arm_shared_virtual_memory extension * + ******************************************/ + +#ifdef CL_VERSION_1_2 + +/* Used by clGetDeviceInfo */ +#define CL_DEVICE_SVM_CAPABILITIES_ARM 0x40B6 + +/* Used by clGetMemObjectInfo */ +#define CL_MEM_USES_SVM_POINTER_ARM 0x40B7 + +/* Used by clSetKernelExecInfoARM: */ +#define CL_KERNEL_EXEC_INFO_SVM_PTRS_ARM 0x40B8 +#define CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM_ARM 0x40B9 + +/* To be used by clGetEventInfo: */ +#define CL_COMMAND_SVM_FREE_ARM 0x40BA +#define CL_COMMAND_SVM_MEMCPY_ARM 0x40BB +#define CL_COMMAND_SVM_MEMFILL_ARM 0x40BC +#define CL_COMMAND_SVM_MAP_ARM 0x40BD +#define CL_COMMAND_SVM_UNMAP_ARM 0x40BE + +/* Flag values returned by clGetDeviceInfo with CL_DEVICE_SVM_CAPABILITIES_ARM as the param_name. */ +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER_ARM (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER_ARM (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM_ARM (1 << 2) +#define CL_DEVICE_SVM_ATOMICS_ARM (1 << 3) + +/* Flag values used by clSVMAllocARM: */ +#define CL_MEM_SVM_FINE_GRAIN_BUFFER_ARM (1 << 10) +#define CL_MEM_SVM_ATOMICS_ARM (1 << 11) + +typedef cl_bitfield cl_svm_mem_flags_arm; +typedef cl_uint cl_kernel_exec_info_arm; +typedef cl_bitfield cl_device_svm_capabilities_arm; + +extern CL_API_ENTRY void * CL_API_CALL +clSVMAllocARM(cl_context /* context */, + cl_svm_mem_flags_arm /* flags */, + size_t /* size */, + cl_uint /* alignment */) CL_EXT_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY void CL_API_CALL +clSVMFreeARM(cl_context /* context */, + void * /* svm_pointer */) CL_EXT_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMFreeARM(cl_command_queue /* command_queue */, + cl_uint /* num_svm_pointers */, + void *[] /* svm_pointers[] */, + void (CL_CALLBACK * /*pfn_free_func*/)(cl_command_queue /* queue */, + cl_uint /* num_svm_pointers */, + void *[] /* svm_pointers[] */, + void * /* user_data */), + void * /* user_data */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMemcpyARM(cl_command_queue /* command_queue */, + cl_bool /* blocking_copy */, + void * /* dst_ptr */, + const void * /* src_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMemFillARM(cl_command_queue /* command_queue */, + void * /* svm_ptr */, + const void * /* pattern */, + size_t /* pattern_size */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMMapARM(cl_command_queue /* command_queue */, + cl_bool /* blocking_map */, + cl_map_flags /* flags */, + void * /* svm_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueSVMUnmapARM(cl_command_queue /* command_queue */, + void * /* svm_ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelArgSVMPointerARM(cl_kernel /* kernel */, + cl_uint /* arg_index */, + const void * /* arg_value */) CL_EXT_SUFFIX__VERSION_1_2; +extern CL_API_ENTRY cl_int CL_API_CALL +clSetKernelExecInfoARM(cl_kernel /* kernel */, + cl_kernel_exec_info_arm /* param_name */, + size_t /* param_value_size */, + const void * /* param_value */) CL_EXT_SUFFIX__VERSION_1_2; + +#endif /* CL_VERSION_1_2 */ + +#ifdef __cplusplus +} +#endif + + +#endif /* __CL_EXT_H */ diff --git a/sources/pvrsdk/Include/CL/cl_gl.h b/sources/pvrsdk/Include/CL/cl_gl.h new file mode 100755 index 00000000..4fbac655 --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl_gl.h @@ -0,0 +1,167 @@ +/********************************************************************************** + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + **********************************************************************************/ + +#ifndef __OPENCL_CL_GL_H +#define __OPENCL_CL_GL_H + +#ifdef __APPLE__ +#include +#else +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef cl_uint cl_gl_object_type; +typedef cl_uint cl_gl_texture_info; +typedef cl_uint cl_gl_platform_info; +typedef struct __GLsync *cl_GLsync; + +/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken */ +#define CL_GL_OBJECT_BUFFER 0x2000 +#define CL_GL_OBJECT_TEXTURE2D 0x2001 +#define CL_GL_OBJECT_TEXTURE3D 0x2002 +#define CL_GL_OBJECT_RENDERBUFFER 0x2003 +#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E +#define CL_GL_OBJECT_TEXTURE1D 0x200F +#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010 +#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011 + +/* cl_gl_texture_info */ +#define CL_GL_TEXTURE_TARGET 0x2004 +#define CL_GL_MIPMAP_LEVEL 0x2005 +#define CL_GL_NUM_SAMPLES 0x2012 + +#ifndef CL_NO_PROTOTYPES +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLBuffer(cl_context /* context */, + cl_mem_flags /* flags */, + cl_GLuint /* bufobj */, + int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLTexture(cl_context /* context */, + cl_mem_flags /* flags */, + cl_GLenum /* target */, + cl_GLint /* miplevel */, + cl_GLuint /* texture */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2; + +extern CL_API_ENTRY cl_mem CL_API_CALL +clCreateFromGLRenderbuffer(cl_context /* context */, + cl_mem_flags /* flags */, + cl_GLuint /* renderbuffer */, + cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLObjectInfo(cl_mem /* memobj */, + cl_gl_object_type * /* gl_object_type */, + cl_GLuint * /* gl_object_name */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLTextureInfo(cl_mem /* memobj */, + cl_gl_texture_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueAcquireGLObjects(cl_command_queue /* command_queue */, + cl_uint /* num_objects */, + const cl_mem * /* mem_objects */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + +extern CL_API_ENTRY cl_int CL_API_CALL +clEnqueueReleaseGLObjects(cl_command_queue /* command_queue */, + cl_uint /* num_objects */, + const cl_mem * /* mem_objects */, + cl_uint /* num_events_in_wait_list */, + const cl_event * /* event_wait_list */, + cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; + + +/* Deprecated OpenCL 1.1 APIs */ +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL +clCreateFromGLTexture2D(cl_context /* context */, + cl_mem_flags /* flags */, + cl_GLenum /* target */, + cl_GLint /* miplevel */, + cl_GLuint /* texture */, + cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; + +extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL +clCreateFromGLTexture3D(cl_context /* context */, + cl_mem_flags /* flags */, + cl_GLenum /* target */, + cl_GLint /* miplevel */, + cl_GLuint /* texture */, + cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED; +#endif +/* cl_khr_gl_sharing extension */ + +#define cl_khr_gl_sharing 1 + +typedef cl_uint cl_gl_context_info; + +/* Additional Error Codes */ +#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000 + +/* cl_gl_context_info */ +#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006 +#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007 + +/* Additional cl_context_properties */ +#define CL_GL_CONTEXT_KHR 0x2008 +#define CL_EGL_DISPLAY_KHR 0x2009 +#define CL_GLX_DISPLAY_KHR 0x200A +#define CL_WGL_HDC_KHR 0x200B +#define CL_CGL_SHAREGROUP_KHR 0x200C + +extern CL_API_ENTRY cl_int CL_API_CALL +clGetGLContextInfoKHR(const cl_context_properties * /* properties */, + cl_gl_context_info /* param_name */, + size_t /* param_value_size */, + void * /* param_value */, + size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; + +typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetGLContextInfoKHR_fn)( + const cl_context_properties * properties, + cl_gl_context_info param_name, + size_t param_value_size, + void * param_value, + size_t * param_value_size_ret); + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_CL_GL_H */ diff --git a/sources/pvrsdk/Include/CL/cl_gl_ext.h b/sources/pvrsdk/Include/CL/cl_gl_ext.h new file mode 100755 index 00000000..e3c14c64 --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl_gl_ext.h @@ -0,0 +1,74 @@ +/********************************************************************************** + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + **********************************************************************************/ + +/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */ + +/* cl_gl_ext.h contains vendor (non-KHR) OpenCL extensions which have */ +/* OpenGL dependencies. */ + +#ifndef __OPENCL_CL_GL_EXT_H +#define __OPENCL_CL_GL_EXT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __APPLE__ + #include +#else + #include +#endif + +/* + * For each extension, follow this template + * cl_VEN_extname extension */ +/* #define cl_VEN_extname 1 + * ... define new types, if any + * ... define new tokens, if any + * ... define new APIs, if any + * + * If you need GLtypes here, mirror them with a cl_GLtype, rather than including a GL header + * This allows us to avoid having to decide whether to include GL headers or GLES here. + */ + +/* + * cl_khr_gl_event extension + * See section 9.9 in the OpenCL 1.1 spec for more information + */ +#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D + +extern CL_API_ENTRY cl_event CL_API_CALL +clCreateEventFromGLsyncKHR(cl_context /* context */, + cl_GLsync /* cl_GLsync */, + cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1; + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_CL_GL_EXT_H */ diff --git a/sources/pvrsdk/Include/CL/cl_platform.h b/sources/pvrsdk/Include/CL/cl_platform.h new file mode 100755 index 00000000..10d99edc --- /dev/null +++ b/sources/pvrsdk/Include/CL/cl_platform.h @@ -0,0 +1,1468 @@ +/********************************************************************************** + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + **********************************************************************************/ + +/* $Revision: 11803 $ on $Date: 2010-06-25 10:02:12 -0700 (Fri, 25 Jun 2010) $ */ + +#ifndef __CL_PLATFORM_H +#define __CL_PLATFORM_H + +#ifdef __APPLE__ + /* Contains #defines for AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER below */ + #include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_WIN32) + #define CL_API_ENTRY + #define CL_API_CALL __stdcall + #define CL_CALLBACK __stdcall +#else + #define CL_API_ENTRY + #define CL_API_CALL + #define CL_CALLBACK +#endif + +/* + * Deprecation flags refer to the last version of the header in which the + * feature was not deprecated. + * + * E.g. VERSION_1_1_DEPRECATED means the feature is present in 1.1 without + * deprecation but is deprecated in versions later than 1.1. + */ + +#ifdef __APPLE__ + #define CL_EXTENSION_WEAK_LINK __attribute__((weak_import)) + #define CL_API_SUFFIX__VERSION_1_0 AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER + #define CL_EXT_SUFFIX__VERSION_1_0 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER + #define CL_API_SUFFIX__VERSION_1_1 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER + #define GCL_API_SUFFIX__VERSION_1_1 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER + #define CL_EXT_SUFFIX__VERSION_1_1 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER + #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER_BUT_DEPRECATED_IN_MAC_OS_X_VERSION_10_7 + + #ifdef AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER + #define CL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER + #define GCL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER + #define CL_EXT_SUFFIX__VERSION_1_2 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER + #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED + #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER_BUT_DEPRECATED_IN_MAC_OS_X_VERSION_10_8 + #else + #warning This path should never happen outside of internal operating system development. AvailabilityMacros do not function correctly here! + #define CL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER + #define GCL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER + #define CL_EXT_SUFFIX__VERSION_1_2 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER + #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER + #endif + + #define CL_API_SUFFIX__VERSION_2_0 + #define CL_EXT_SUFFIX__VERSION_2_0 + #define CL_API_SUFFIX__VERSION_2_1 + #define CL_EXT_SUFFIX__VERSION_2_1 + #define CL_API_SUFFIX__VERSION_2_2 + #define CL_EXT_SUFFIX__VERSION_2_2 + #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED + #define CL_EXT_SUFFIX__VERSION_2_0_DEPRECATED +#else + #define CL_EXTENSION_WEAK_LINK + #define CL_API_SUFFIX__VERSION_1_0 + #define CL_EXT_SUFFIX__VERSION_1_0 + #define CL_API_SUFFIX__VERSION_1_1 + #define CL_EXT_SUFFIX__VERSION_1_1 + #define CL_API_SUFFIX__VERSION_1_2 + #define CL_EXT_SUFFIX__VERSION_1_2 + #define CL_API_SUFFIX__VERSION_2_0 + #define CL_EXT_SUFFIX__VERSION_2_0 + #define CL_API_SUFFIX__VERSION_2_1 + #define CL_EXT_SUFFIX__VERSION_2_1 + #define CL_API_SUFFIX__VERSION_2_2 + #define CL_EXT_SUFFIX__VERSION_2_2 + + #ifdef __GNUC__ + #ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS + #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED __attribute__((deprecated)) + #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED + #endif + + #ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS + #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED __attribute__((deprecated)) + #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED + #endif + + #ifdef CL_USE_DEPRECATED_OPENCL_1_2_APIS + #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED __attribute__((deprecated)) + #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED + #endif + + #ifdef CL_USE_DEPRECATED_OPENCL_2_0_APIS + #define CL_EXT_SUFFIX__VERSION_2_0_DEPRECATED + #define CL_EXT_PREFIX__VERSION_2_0_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_2_0_DEPRECATED __attribute__((deprecated)) + #define CL_EXT_PREFIX__VERSION_2_0_DEPRECATED + #endif + + #ifdef CL_USE_DEPRECATED_OPENCL_2_1_APIS + #define CL_EXT_SUFFIX__VERSION_2_1_DEPRECATED + #define CL_EXT_PREFIX__VERSION_2_1_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_2_1_DEPRECATED __attribute__((deprecated)) + #define CL_EXT_PREFIX__VERSION_2_1_DEPRECATED + #endif + #elif defined(_WIN32) + #ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS + #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED __declspec(deprecated) + #endif + + #ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS + #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED __declspec(deprecated) + #endif + + #ifdef CL_USE_DEPRECATED_OPENCL_1_2_APIS + #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED __declspec(deprecated) + #endif + + #ifdef CL_USE_DEPRECATED_OPENCL_2_0_APIS + #define CL_EXT_SUFFIX__VERSION_2_0_DEPRECATED + #define CL_EXT_PREFIX__VERSION_2_0_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_2_0_DEPRECATED + #define CL_EXT_PREFIX__VERSION_2_0_DEPRECATED __declspec(deprecated) + #endif + + #ifdef CL_USE_DEPRECATED_OPENCL_2_1_APIS + #define CL_EXT_SUFFIX__VERSION_2_1_DEPRECATED + #define CL_EXT_PREFIX__VERSION_2_1_DEPRECATED + #else + #define CL_EXT_SUFFIX__VERSION_2_1_DEPRECATED + #define CL_EXT_PREFIX__VERSION_2_1_DEPRECATED __declspec(deprecated) + #endif + #else + #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED + + #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED + + #define CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED + #define CL_EXT_PREFIX__VERSION_1_2_DEPRECATED + + #define CL_EXT_SUFFIX__VERSION_2_0_DEPRECATED + #define CL_EXT_PREFIX__VERSION_2_0_DEPRECATED + + #define CL_EXT_SUFFIX__VERSION_2_1_DEPRECATED + #define CL_EXT_PREFIX__VERSION_2_1_DEPRECATED + #endif +#endif + +#if (defined (_WIN32) && defined(_MSC_VER)) + +/* scalar types */ +typedef signed __int8 cl_char; +typedef unsigned __int8 cl_uchar; +typedef signed __int16 cl_short; +typedef unsigned __int16 cl_ushort; +typedef signed __int32 cl_int; +typedef unsigned __int32 cl_uint; +typedef signed __int64 cl_long; +typedef unsigned __int64 cl_ulong; + +typedef unsigned __int16 cl_half; +typedef float cl_float; +typedef double cl_double; + +/* Macro names and corresponding values defined by OpenCL */ +#define CL_CHAR_BIT 8 +#define CL_SCHAR_MAX 127 +#define CL_SCHAR_MIN (-127-1) +#define CL_CHAR_MAX CL_SCHAR_MAX +#define CL_CHAR_MIN CL_SCHAR_MIN +#define CL_UCHAR_MAX 255 +#define CL_SHRT_MAX 32767 +#define CL_SHRT_MIN (-32767-1) +#define CL_USHRT_MAX 65535 +#define CL_INT_MAX 2147483647 +#define CL_INT_MIN (-2147483647-1) +#define CL_UINT_MAX 0xffffffffU +#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL) +#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL) +#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL) + +#define CL_FLT_DIG 6 +#define CL_FLT_MANT_DIG 24 +#define CL_FLT_MAX_10_EXP +38 +#define CL_FLT_MAX_EXP +128 +#define CL_FLT_MIN_10_EXP -37 +#define CL_FLT_MIN_EXP -125 +#define CL_FLT_RADIX 2 +#define CL_FLT_MAX 340282346638528859811704183484516925440.0f +#define CL_FLT_MIN 1.175494350822287507969e-38f +#define CL_FLT_EPSILON 1.1920928955078125e-7f + +#define CL_HALF_DIG 3 +#define CL_HALF_MANT_DIG 11 +#define CL_HALF_MAX_10_EXP +4 +#define CL_HALF_MAX_EXP +16 +#define CL_HALF_MIN_10_EXP -4 +#define CL_HALF_MIN_EXP -13 +#define CL_HALF_RADIX 2 +#define CL_HALF_MAX 65504.0f +#define CL_HALF_MIN 6.103515625e-05f +#define CL_HALF_EPSILON 9.765625e-04f + +#define CL_DBL_DIG 15 +#define CL_DBL_MANT_DIG 53 +#define CL_DBL_MAX_10_EXP +308 +#define CL_DBL_MAX_EXP +1024 +#define CL_DBL_MIN_10_EXP -307 +#define CL_DBL_MIN_EXP -1021 +#define CL_DBL_RADIX 2 +#define CL_DBL_MAX 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0 +#define CL_DBL_MIN 2.225073858507201383090e-308 +#define CL_DBL_EPSILON 2.220446049250313080847e-16 + +#define CL_M_E 2.718281828459045090796 +#define CL_M_LOG2E 1.442695040888963387005 +#define CL_M_LOG10E 0.434294481903251816668 +#define CL_M_LN2 0.693147180559945286227 +#define CL_M_LN10 2.302585092994045901094 +#define CL_M_PI 3.141592653589793115998 +#define CL_M_PI_2 1.570796326794896557999 +#define CL_M_PI_4 0.785398163397448278999 +#define CL_M_1_PI 0.318309886183790691216 +#define CL_M_2_PI 0.636619772367581382433 +#define CL_M_2_SQRTPI 1.128379167095512558561 +#define CL_M_SQRT2 1.414213562373095145475 +#define CL_M_SQRT1_2 0.707106781186547572737 + +#define CL_M_E_F 2.71828174591064f +#define CL_M_LOG2E_F 1.44269502162933f +#define CL_M_LOG10E_F 0.43429449200630f +#define CL_M_LN2_F 0.69314718246460f +#define CL_M_LN10_F 2.30258512496948f +#define CL_M_PI_F 3.14159274101257f +#define CL_M_PI_2_F 1.57079637050629f +#define CL_M_PI_4_F 0.78539818525314f +#define CL_M_1_PI_F 0.31830987334251f +#define CL_M_2_PI_F 0.63661974668503f +#define CL_M_2_SQRTPI_F 1.12837922573090f +#define CL_M_SQRT2_F 1.41421353816986f +#define CL_M_SQRT1_2_F 0.70710676908493f + +#define CL_NAN (CL_INFINITY - CL_INFINITY) +#define CL_HUGE_VALF ((cl_float) 1e50) +#define CL_HUGE_VAL ((cl_double) 1e500) +#define CL_MAXFLOAT CL_FLT_MAX +#define CL_INFINITY CL_HUGE_VALF + +#else + +#include + +/* scalar types */ +typedef int8_t cl_char; +typedef uint8_t cl_uchar; +typedef int16_t cl_short __attribute__((aligned(2))); +typedef uint16_t cl_ushort __attribute__((aligned(2))); +typedef int32_t cl_int __attribute__((aligned(4))); +typedef uint32_t cl_uint __attribute__((aligned(4))); +typedef int64_t cl_long __attribute__((aligned(8))); +typedef uint64_t cl_ulong __attribute__((aligned(8))); + +typedef uint16_t cl_half __attribute__((aligned(2))); +typedef float cl_float __attribute__((aligned(4))); +typedef double cl_double __attribute__((aligned(8))); + +/* Macro names and corresponding values defined by OpenCL */ +#define CL_CHAR_BIT 8 +#define CL_SCHAR_MAX 127 +#define CL_SCHAR_MIN (-127-1) +#define CL_CHAR_MAX CL_SCHAR_MAX +#define CL_CHAR_MIN CL_SCHAR_MIN +#define CL_UCHAR_MAX 255 +#define CL_SHRT_MAX 32767 +#define CL_SHRT_MIN (-32767-1) +#define CL_USHRT_MAX 65535 +#define CL_INT_MAX 2147483647 +#define CL_INT_MIN (-2147483647-1) +#define CL_UINT_MAX 0xffffffffU +#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL) +#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL) +#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL) + +#define CL_FLT_DIG 6 +#define CL_FLT_MANT_DIG 24 +#define CL_FLT_MAX_10_EXP +38 +#define CL_FLT_MAX_EXP +128 +#define CL_FLT_MIN_10_EXP -37 +#define CL_FLT_MIN_EXP -125 +#define CL_FLT_RADIX 2 +#define CL_FLT_MAX 340282346638528859811704183484516925440.0f +#define CL_FLT_MIN 1.175494350822287507969e-38f +#define CL_FLT_EPSILON 1.1920928955078125e-7f + +#define CL_HALF_DIG 3 +#define CL_HALF_MANT_DIG 11 +#define CL_HALF_MAX_10_EXP +4 +#define CL_HALF_MAX_EXP +16 +#define CL_HALF_MIN_10_EXP -4 +#define CL_HALF_MIN_EXP -13 +#define CL_HALF_RADIX 2 +#define CL_HALF_MAX 65504.0f +#define CL_HALF_MIN 6.103515625e-05f +#define CL_HALF_EPSILON 9.765625e-04f + +#define CL_DBL_DIG 15 +#define CL_DBL_MANT_DIG 53 +#define CL_DBL_MAX_10_EXP +308 +#define CL_DBL_MAX_EXP +1024 +#define CL_DBL_MIN_10_EXP -307 +#define CL_DBL_MIN_EXP -1021 +#define CL_DBL_RADIX 2 +#define CL_DBL_MAX 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0 +#define CL_DBL_MIN 2.225073858507201383090e-308 +#define CL_DBL_EPSILON 2.220446049250313080847e-16 + +#define CL_M_E 2.718281828459045090796 +#define CL_M_LOG2E 1.442695040888963387005 +#define CL_M_LOG10E 0.434294481903251816668 +#define CL_M_LN2 0.693147180559945286227 +#define CL_M_LN10 2.302585092994045901094 +#define CL_M_PI 3.141592653589793115998 +#define CL_M_PI_2 1.570796326794896557999 +#define CL_M_PI_4 0.785398163397448278999 +#define CL_M_1_PI 0.318309886183790691216 +#define CL_M_2_PI 0.636619772367581382433 +#define CL_M_2_SQRTPI 1.128379167095512558561 +#define CL_M_SQRT2 1.414213562373095145475 +#define CL_M_SQRT1_2 0.707106781186547572737 + +#define CL_M_E_F 2.71828174591064f +#define CL_M_LOG2E_F 1.44269502162933f +#define CL_M_LOG10E_F 0.43429449200630f +#define CL_M_LN2_F 0.69314718246460f +#define CL_M_LN10_F 2.30258512496948f +#define CL_M_PI_F 3.14159274101257f +#define CL_M_PI_2_F 1.57079637050629f +#define CL_M_PI_4_F 0.78539818525314f +#define CL_M_1_PI_F 0.31830987334251f +#define CL_M_2_PI_F 0.63661974668503f +#define CL_M_2_SQRTPI_F 1.12837922573090f +#define CL_M_SQRT2_F 1.41421353816986f +#define CL_M_SQRT1_2_F 0.70710676908493f + +#if defined( __GNUC__ ) + #define CL_HUGE_VALF __builtin_huge_valf() + #define CL_HUGE_VAL __builtin_huge_val() + #define CL_NAN __builtin_nanf( "" ) +#else + #define CL_HUGE_VALF ((cl_float) 1e50) + #define CL_HUGE_VAL ((cl_double) 1e500) + float nanf( const char * ); + #define CL_NAN nanf( "" ) +#endif +#define CL_MAXFLOAT CL_FLT_MAX +#define CL_INFINITY CL_HUGE_VALF + +#endif + +#include + +/* Mirror types to GL types. Mirror types allow us to avoid deciding which 87s to load based on whether we are using GL or GLES here. */ +typedef unsigned int cl_GLuint; +typedef int cl_GLint; +typedef unsigned int cl_GLenum; + +/* + * Vector types + * + * Note: OpenCL requires that all types be naturally aligned. + * This means that vector types must be naturally aligned. + * For example, a vector of four floats must be aligned to + * a 16 byte boundary (calculated as 4 * the natural 4-byte + * alignment of the float). The alignment qualifiers here + * will only function properly if your compiler supports them + * and if you don't actively work to defeat them. For example, + * in order for a cl_float4 to be 16 byte aligned in a struct, + * the start of the struct must itself be 16-byte aligned. + * + * Maintaining proper alignment is the user's responsibility. + */ + +/* Define basic vector types */ +#if defined( __VEC__ ) + #include /* may be omitted depending on compiler. AltiVec spec provides no way to detect whether the header is required. */ + typedef vector unsigned char __cl_uchar16; + typedef vector signed char __cl_char16; + typedef vector unsigned short __cl_ushort8; + typedef vector signed short __cl_short8; + typedef vector unsigned int __cl_uint4; + typedef vector signed int __cl_int4; + typedef vector float __cl_float4; + #define __CL_UCHAR16__ 1 + #define __CL_CHAR16__ 1 + #define __CL_USHORT8__ 1 + #define __CL_SHORT8__ 1 + #define __CL_UINT4__ 1 + #define __CL_INT4__ 1 + #define __CL_FLOAT4__ 1 +#endif + +#if defined( __SSE__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef float __cl_float4 __attribute__((vector_size(16))); + #else + typedef __m128 __cl_float4; + #endif + #define __CL_FLOAT4__ 1 +#endif + +#if defined( __SSE2__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef cl_uchar __cl_uchar16 __attribute__((vector_size(16))); + typedef cl_char __cl_char16 __attribute__((vector_size(16))); + typedef cl_ushort __cl_ushort8 __attribute__((vector_size(16))); + typedef cl_short __cl_short8 __attribute__((vector_size(16))); + typedef cl_uint __cl_uint4 __attribute__((vector_size(16))); + typedef cl_int __cl_int4 __attribute__((vector_size(16))); + typedef cl_ulong __cl_ulong2 __attribute__((vector_size(16))); + typedef cl_long __cl_long2 __attribute__((vector_size(16))); + typedef cl_double __cl_double2 __attribute__((vector_size(16))); + #else + typedef __m128i __cl_uchar16; + typedef __m128i __cl_char16; + typedef __m128i __cl_ushort8; + typedef __m128i __cl_short8; + typedef __m128i __cl_uint4; + typedef __m128i __cl_int4; + typedef __m128i __cl_ulong2; + typedef __m128i __cl_long2; + typedef __m128d __cl_double2; + #endif + #define __CL_UCHAR16__ 1 + #define __CL_CHAR16__ 1 + #define __CL_USHORT8__ 1 + #define __CL_SHORT8__ 1 + #define __CL_INT4__ 1 + #define __CL_UINT4__ 1 + #define __CL_ULONG2__ 1 + #define __CL_LONG2__ 1 + #define __CL_DOUBLE2__ 1 +#endif + +#if defined( __MMX__ ) + #include + #if defined( __GNUC__ ) + typedef cl_uchar __cl_uchar8 __attribute__((vector_size(8))); + typedef cl_char __cl_char8 __attribute__((vector_size(8))); + typedef cl_ushort __cl_ushort4 __attribute__((vector_size(8))); + typedef cl_short __cl_short4 __attribute__((vector_size(8))); + typedef cl_uint __cl_uint2 __attribute__((vector_size(8))); + typedef cl_int __cl_int2 __attribute__((vector_size(8))); + typedef cl_ulong __cl_ulong1 __attribute__((vector_size(8))); + typedef cl_long __cl_long1 __attribute__((vector_size(8))); + typedef cl_float __cl_float2 __attribute__((vector_size(8))); + #else + typedef __m64 __cl_uchar8; + typedef __m64 __cl_char8; + typedef __m64 __cl_ushort4; + typedef __m64 __cl_short4; + typedef __m64 __cl_uint2; + typedef __m64 __cl_int2; + typedef __m64 __cl_ulong1; + typedef __m64 __cl_long1; + typedef __m64 __cl_float2; + #endif + #define __CL_UCHAR8__ 1 + #define __CL_CHAR8__ 1 + #define __CL_USHORT4__ 1 + #define __CL_SHORT4__ 1 + #define __CL_INT2__ 1 + #define __CL_UINT2__ 1 + #define __CL_ULONG1__ 1 + #define __CL_LONG1__ 1 + #define __CL_FLOAT2__ 1 +#endif + +#if defined( __AVX__ ) + #if defined( __MINGW64__ ) + #include + #else + #include + #endif + #if defined( __GNUC__ ) + typedef cl_float __cl_float8 __attribute__((vector_size(32))); + typedef cl_double __cl_double4 __attribute__((vector_size(32))); + #else + typedef __m256 __cl_float8; + typedef __m256d __cl_double4; + #endif + #define __CL_FLOAT8__ 1 + #define __CL_DOUBLE4__ 1 +#endif + +/* Define capabilities for anonymous struct members. */ +#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ +#elif defined( __GNUC__) && ! defined( __STRICT_ANSI__ ) +#define __CL_HAS_ANON_STRUCT__ 1 +#define __CL_ANON_STRUCT__ __extension__ +#elif defined( _WIN32) && defined(_MSC_VER) + #if _MSC_VER >= 1500 + /* Microsoft Developer Studio 2008 supports anonymous structs, but + * complains by default. */ + #define __CL_HAS_ANON_STRUCT__ 1 + #define __CL_ANON_STRUCT__ + /* Disable warning C4201: nonstandard extension used : nameless + * struct/union */ + #pragma warning( push ) + #pragma warning( disable : 4201 ) + #endif +#else +#define __CL_HAS_ANON_STRUCT__ 0 +#define __CL_ANON_STRUCT__ +#endif + +/* Define alignment keys */ +#if defined( __GNUC__ ) + #define CL_ALIGNED(_x) __attribute__ ((aligned(_x))) +#elif defined( _WIN32) && (_MSC_VER) + /* Alignment keys neutered on windows because MSVC can't swallow function arguments with alignment requirements */ + /* http://msdn.microsoft.com/en-us/library/373ak2y1%28VS.71%29.aspx */ + /* #include */ + /* #define CL_ALIGNED(_x) _CRT_ALIGN(_x) */ + #define CL_ALIGNED(_x) +#else + #warning Need to implement some method to align data here + #define CL_ALIGNED(_x) +#endif + +/* Indicate whether .xyzw, .s0123 and .hi.lo are supported */ +#if __CL_HAS_ANON_STRUCT__ + /* .xyzw and .s0123...{f|F} are supported */ + #define CL_HAS_NAMED_VECTOR_FIELDS 1 + /* .hi and .lo are supported */ + #define CL_HAS_HI_LO_VECTOR_FIELDS 1 +#endif + +/* Define cl_vector types */ + +/* ---- cl_charn ---- */ +typedef union +{ + cl_char CL_ALIGNED(2) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_char lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2; +#endif +}cl_char2; + +typedef union +{ + cl_char CL_ALIGNED(4) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_char2 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[2]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4; +#endif +}cl_char4; + +/* cl_char3 is identical in size, alignment and behavior to cl_char4. See section 6.1.5. */ +typedef cl_char4 cl_char3; + +typedef union +{ + cl_char CL_ALIGNED(8) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_char4 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[4]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4[2]; +#endif +#if defined( __CL_CHAR8__ ) + __cl_char8 v8; +#endif +}cl_char8; + +typedef union +{ + cl_char CL_ALIGNED(16) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_char x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_char8 lo, hi; }; +#endif +#if defined( __CL_CHAR2__) + __cl_char2 v2[8]; +#endif +#if defined( __CL_CHAR4__) + __cl_char4 v4[4]; +#endif +#if defined( __CL_CHAR8__ ) + __cl_char8 v8[2]; +#endif +#if defined( __CL_CHAR16__ ) + __cl_char16 v16; +#endif +}cl_char16; + + +/* ---- cl_ucharn ---- */ +typedef union +{ + cl_uchar CL_ALIGNED(2) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_uchar lo, hi; }; +#endif +#if defined( __cl_uchar2__) + __cl_uchar2 v2; +#endif +}cl_uchar2; + +typedef union +{ + cl_uchar CL_ALIGNED(4) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_uchar2 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[2]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4; +#endif +}cl_uchar4; + +/* cl_uchar3 is identical in size, alignment and behavior to cl_uchar4. See section 6.1.5. */ +typedef cl_uchar4 cl_uchar3; + +typedef union +{ + cl_uchar CL_ALIGNED(8) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_uchar4 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[4]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4[2]; +#endif +#if defined( __CL_UCHAR8__ ) + __cl_uchar8 v8; +#endif +}cl_uchar8; + +typedef union +{ + cl_uchar CL_ALIGNED(16) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_uchar8 lo, hi; }; +#endif +#if defined( __CL_UCHAR2__) + __cl_uchar2 v2[8]; +#endif +#if defined( __CL_UCHAR4__) + __cl_uchar4 v4[4]; +#endif +#if defined( __CL_UCHAR8__ ) + __cl_uchar8 v8[2]; +#endif +#if defined( __CL_UCHAR16__ ) + __cl_uchar16 v16; +#endif +}cl_uchar16; + + +/* ---- cl_shortn ---- */ +typedef union +{ + cl_short CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_short lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2; +#endif +}cl_short2; + +typedef union +{ + cl_short CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_short2 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[2]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4; +#endif +}cl_short4; + +/* cl_short3 is identical in size, alignment and behavior to cl_short4. See section 6.1.5. */ +typedef cl_short4 cl_short3; + +typedef union +{ + cl_short CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_short4 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[4]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4[2]; +#endif +#if defined( __CL_SHORT8__ ) + __cl_short8 v8; +#endif +}cl_short8; + +typedef union +{ + cl_short CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_short x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_short8 lo, hi; }; +#endif +#if defined( __CL_SHORT2__) + __cl_short2 v2[8]; +#endif +#if defined( __CL_SHORT4__) + __cl_short4 v4[4]; +#endif +#if defined( __CL_SHORT8__ ) + __cl_short8 v8[2]; +#endif +#if defined( __CL_SHORT16__ ) + __cl_short16 v16; +#endif +}cl_short16; + + +/* ---- cl_ushortn ---- */ +typedef union +{ + cl_ushort CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_ushort lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2; +#endif +}cl_ushort2; + +typedef union +{ + cl_ushort CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_ushort2 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[2]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4; +#endif +}cl_ushort4; + +/* cl_ushort3 is identical in size, alignment and behavior to cl_ushort4. See section 6.1.5. */ +typedef cl_ushort4 cl_ushort3; + +typedef union +{ + cl_ushort CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_ushort4 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[4]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4[2]; +#endif +#if defined( __CL_USHORT8__ ) + __cl_ushort8 v8; +#endif +}cl_ushort8; + +typedef union +{ + cl_ushort CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_ushort8 lo, hi; }; +#endif +#if defined( __CL_USHORT2__) + __cl_ushort2 v2[8]; +#endif +#if defined( __CL_USHORT4__) + __cl_ushort4 v4[4]; +#endif +#if defined( __CL_USHORT8__ ) + __cl_ushort8 v8[2]; +#endif +#if defined( __CL_USHORT16__ ) + __cl_ushort16 v16; +#endif +}cl_ushort16; + + +/* ---- cl_halfn ---- */ +typedef union +{ + cl_half CL_ALIGNED(4) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_half lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2; +#endif +}cl_half2; + +typedef union +{ + cl_half CL_ALIGNED(8) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_half2 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[2]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4; +#endif +}cl_half4; + +/* cl_half3 is identical in size, alignment and behavior to cl_half4. See section 6.1.5. */ +typedef cl_half4 cl_half3; + +typedef union +{ + cl_half CL_ALIGNED(16) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_half4 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[4]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4[2]; +#endif +#if defined( __CL_HALF8__ ) + __cl_half8 v8; +#endif +}cl_half8; + +typedef union +{ + cl_half CL_ALIGNED(32) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_half x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_half8 lo, hi; }; +#endif +#if defined( __CL_HALF2__) + __cl_half2 v2[8]; +#endif +#if defined( __CL_HALF4__) + __cl_half4 v4[4]; +#endif +#if defined( __CL_HALF8__ ) + __cl_half8 v8[2]; +#endif +#if defined( __CL_HALF16__ ) + __cl_half16 v16; +#endif +}cl_half16; + +/* ---- cl_intn ---- */ +typedef union +{ + cl_int CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_int lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2; +#endif +}cl_int2; + +typedef union +{ + cl_int CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_int2 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[2]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4; +#endif +}cl_int4; + +/* cl_int3 is identical in size, alignment and behavior to cl_int4. See section 6.1.5. */ +typedef cl_int4 cl_int3; + +typedef union +{ + cl_int CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_int4 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[4]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4[2]; +#endif +#if defined( __CL_INT8__ ) + __cl_int8 v8; +#endif +}cl_int8; + +typedef union +{ + cl_int CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_int x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_int8 lo, hi; }; +#endif +#if defined( __CL_INT2__) + __cl_int2 v2[8]; +#endif +#if defined( __CL_INT4__) + __cl_int4 v4[4]; +#endif +#if defined( __CL_INT8__ ) + __cl_int8 v8[2]; +#endif +#if defined( __CL_INT16__ ) + __cl_int16 v16; +#endif +}cl_int16; + + +/* ---- cl_uintn ---- */ +typedef union +{ + cl_uint CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_uint lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2; +#endif +}cl_uint2; + +typedef union +{ + cl_uint CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_uint2 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[2]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4; +#endif +}cl_uint4; + +/* cl_uint3 is identical in size, alignment and behavior to cl_uint4. See section 6.1.5. */ +typedef cl_uint4 cl_uint3; + +typedef union +{ + cl_uint CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_uint4 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[4]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4[2]; +#endif +#if defined( __CL_UINT8__ ) + __cl_uint8 v8; +#endif +}cl_uint8; + +typedef union +{ + cl_uint CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_uint8 lo, hi; }; +#endif +#if defined( __CL_UINT2__) + __cl_uint2 v2[8]; +#endif +#if defined( __CL_UINT4__) + __cl_uint4 v4[4]; +#endif +#if defined( __CL_UINT8__ ) + __cl_uint8 v8[2]; +#endif +#if defined( __CL_UINT16__ ) + __cl_uint16 v16; +#endif +}cl_uint16; + +/* ---- cl_longn ---- */ +typedef union +{ + cl_long CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_long lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2; +#endif +}cl_long2; + +typedef union +{ + cl_long CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_long2 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[2]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4; +#endif +}cl_long4; + +/* cl_long3 is identical in size, alignment and behavior to cl_long4. See section 6.1.5. */ +typedef cl_long4 cl_long3; + +typedef union +{ + cl_long CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_long4 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[4]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4[2]; +#endif +#if defined( __CL_LONG8__ ) + __cl_long8 v8; +#endif +}cl_long8; + +typedef union +{ + cl_long CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_long x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_long8 lo, hi; }; +#endif +#if defined( __CL_LONG2__) + __cl_long2 v2[8]; +#endif +#if defined( __CL_LONG4__) + __cl_long4 v4[4]; +#endif +#if defined( __CL_LONG8__ ) + __cl_long8 v8[2]; +#endif +#if defined( __CL_LONG16__ ) + __cl_long16 v16; +#endif +}cl_long16; + + +/* ---- cl_ulongn ---- */ +typedef union +{ + cl_ulong CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_ulong lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2; +#endif +}cl_ulong2; + +typedef union +{ + cl_ulong CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_ulong2 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[2]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4; +#endif +}cl_ulong4; + +/* cl_ulong3 is identical in size, alignment and behavior to cl_ulong4. See section 6.1.5. */ +typedef cl_ulong4 cl_ulong3; + +typedef union +{ + cl_ulong CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_ulong4 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[4]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4[2]; +#endif +#if defined( __CL_ULONG8__ ) + __cl_ulong8 v8; +#endif +}cl_ulong8; + +typedef union +{ + cl_ulong CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_ulong8 lo, hi; }; +#endif +#if defined( __CL_ULONG2__) + __cl_ulong2 v2[8]; +#endif +#if defined( __CL_ULONG4__) + __cl_ulong4 v4[4]; +#endif +#if defined( __CL_ULONG8__ ) + __cl_ulong8 v8[2]; +#endif +#if defined( __CL_ULONG16__ ) + __cl_ulong16 v16; +#endif +}cl_ulong16; + + +/* --- cl_floatn ---- */ + +typedef union +{ + cl_float CL_ALIGNED(8) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_float lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2; +#endif +}cl_float2; + +typedef union +{ + cl_float CL_ALIGNED(16) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_float2 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[2]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4; +#endif +}cl_float4; + +/* cl_float3 is identical in size, alignment and behavior to cl_float4. See section 6.1.5. */ +typedef cl_float4 cl_float3; + +typedef union +{ + cl_float CL_ALIGNED(32) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_float4 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[4]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4[2]; +#endif +#if defined( __CL_FLOAT8__ ) + __cl_float8 v8; +#endif +}cl_float8; + +typedef union +{ + cl_float CL_ALIGNED(64) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_float x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_float8 lo, hi; }; +#endif +#if defined( __CL_FLOAT2__) + __cl_float2 v2[8]; +#endif +#if defined( __CL_FLOAT4__) + __cl_float4 v4[4]; +#endif +#if defined( __CL_FLOAT8__ ) + __cl_float8 v8[2]; +#endif +#if defined( __CL_FLOAT16__ ) + __cl_float16 v16; +#endif +}cl_float16; + +/* --- cl_doublen ---- */ + +typedef union +{ + cl_double CL_ALIGNED(16) s[2]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1; }; + __CL_ANON_STRUCT__ struct{ cl_double lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2; +#endif +}cl_double2; + +typedef union +{ + cl_double CL_ALIGNED(32) s[4]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3; }; + __CL_ANON_STRUCT__ struct{ cl_double2 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[2]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4; +#endif +}cl_double4; + +/* cl_double3 is identical in size, alignment and behavior to cl_double4. See section 6.1.5. */ +typedef cl_double4 cl_double3; + +typedef union +{ + cl_double CL_ALIGNED(64) s[8]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7; }; + __CL_ANON_STRUCT__ struct{ cl_double4 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[4]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4[2]; +#endif +#if defined( __CL_DOUBLE8__ ) + __cl_double8 v8; +#endif +}cl_double8; + +typedef union +{ + cl_double CL_ALIGNED(128) s[16]; +#if __CL_HAS_ANON_STRUCT__ + __CL_ANON_STRUCT__ struct{ cl_double x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; + __CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; + __CL_ANON_STRUCT__ struct{ cl_double8 lo, hi; }; +#endif +#if defined( __CL_DOUBLE2__) + __cl_double2 v2[8]; +#endif +#if defined( __CL_DOUBLE4__) + __cl_double4 v4[4]; +#endif +#if defined( __CL_DOUBLE8__ ) + __cl_double8 v8[2]; +#endif +#if defined( __CL_DOUBLE16__ ) + __cl_double16 v16; +#endif +}cl_double16; + +/* Macro to facilitate debugging + * Usage: + * Place CL_PROGRAM_STRING_DEBUG_INFO on the line before the first line of your source. + * The first line ends with: CL_PROGRAM_STRING_DEBUG_INFO \" + * Each line thereafter of OpenCL C source must end with: \n\ + * The last line ends in "; + * + * Example: + * + * const char *my_program = CL_PROGRAM_STRING_DEBUG_INFO "\ + * kernel void foo( int a, float * b ) \n\ + * { \n\ + * // my comment \n\ + * *b[ get_global_id(0)] = a; \n\ + * } \n\ + * "; + * + * This should correctly set up the line, (column) and file information for your source + * string so you can do source level debugging. + */ +#define __CL_STRINGIFY( _x ) # _x +#define _CL_STRINGIFY( _x ) __CL_STRINGIFY( _x ) +#define CL_PROGRAM_STRING_DEBUG_INFO "#line " _CL_STRINGIFY(__LINE__) " \"" __FILE__ "\" \n\n" + +#ifdef __cplusplus +} +#endif + +#undef __CL_HAS_ANON_STRUCT__ +#undef __CL_ANON_STRUCT__ +#if defined( _WIN32) && defined(_MSC_VER) + #if _MSC_VER >=1500 + #pragma warning( pop ) + #endif +#endif + +#endif /* __CL_PLATFORM_H */ diff --git a/sources/pvrsdk/Include/CL/opencl.h b/sources/pvrsdk/Include/CL/opencl.h new file mode 100755 index 00000000..bef4d212 --- /dev/null +++ b/sources/pvrsdk/Include/CL/opencl.h @@ -0,0 +1,60 @@ +/******************************************************************************* + * Copyright (c) 2008-2015 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS + * KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS + * SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT + * https://www.khronos.org/registry/ + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + ******************************************************************************/ + +/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */ + +#ifndef __OPENCL_H +#define __OPENCL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __APPLE__ +#ifndef TARGET_OS_IPHONE +#include +#include +#include +#include +#include +#endif +#else + +#include +#include +#include +#include + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __OPENCL_H */ + diff --git a/sources/pvrsdk/Include/DynamicEgl.h b/sources/pvrsdk/Include/DynamicEgl.h new file mode 100755 index 00000000..f5018145 --- /dev/null +++ b/sources/pvrsdk/Include/DynamicEgl.h @@ -0,0 +1,1190 @@ +#pragma once +#ifdef __APPLE__ +#include "TargetConditionals.h" +#endif +#if TARGET_OS_IPHONE +#ifdef __OBJC__ +#import +#else +class EAGLContext; +#endif +#else +#if defined( _WIN32 ) +#define WIN32_LEAN_AND_MEAN +#define NOMINMAX +#elif defined(WAYLAND) +#include +#endif +#define EGL_NO_PROTOTYPES +#include "EGL/egl.h" +#include "EGL/eglext.h" +#include +#include +#include "pvr_openlib.h" + +namespace egl { + namespace internal { +#ifdef _WIN32 + static const char* libName = "libEGL.dll"; +#elif defined(__APPLE__) + static const char* libName = "libEGL.dylib"; +#else + static const char* libName = "libEGL.so"; +#endif + } +} + +namespace egl { +namespace internal { +namespace EglFuncName { +enum EglFunctionName +{ + ChooseConfig, CopyBuffers, CreateContext, CreatePbufferSurface, CreatePixmapSurface, CreateWindowSurface, DestroyContext, DestroySurface, GetConfigAttrib, + GetConfigs, GetCurrentDisplay, GetCurrentSurface, GetDisplay, GetError, GetProcAddress, Initialize, MakeCurrent, QueryContext, QueryString, QuerySurface, + SwapBuffers, Terminate, WaitGL, WaitNative, BindTexImage, ReleaseTexImage, SurfaceAttrib, SwapInterval, BindAPI, QueryAPI, CreatePbufferFromClientBuffer, + ReleaseThread, WaitClient, GetCurrentContext, CreateSync, DestroySync, ClientWaitSync, GetSyncAttrib, CreateImage, DestroyImage, GetPlatformDisplay, + CreatePlatformWindowSurface, CreatePlatformPixmapSurface, WaitSync, + NUMBER_OF_EGL_FUNCTIONS +}; +} + +inline void* getEglFunction(egl::internal::EglFuncName::EglFunctionName funcname) +{ + static void* FunctionTable[EglFuncName::NUMBER_OF_EGL_FUNCTIONS]; + + // GET FUNCTION POINTERS --- ONCE!!!! /// + if (!FunctionTable[0]) + { + pvr::lib::LIBTYPE lib = pvr::lib::openlib(egl::internal::libName); + if (!lib) + { + Log_Error("EGL Bindings: Failed to open library %s\n", egl::internal::libName); + } + else + { + Log_Info("EGL Bindings: Successfully loaded library %s\n", egl::internal::libName); + } + FunctionTable[EglFuncName::ChooseConfig] = pvr::lib::getLibFunctionChecked(lib, "eglChooseConfig"); + FunctionTable[EglFuncName::CopyBuffers] = pvr::lib::getLibFunctionChecked(lib, "eglCopyBuffers"); + FunctionTable[EglFuncName::CreateContext] = pvr::lib::getLibFunctionChecked(lib, "eglCreateContext"); + FunctionTable[EglFuncName::CreatePbufferSurface] = pvr::lib::getLibFunctionChecked(lib, "eglCreatePbufferSurface"); + FunctionTable[EglFuncName::CreatePixmapSurface] = pvr::lib::getLibFunctionChecked(lib, "eglCreatePixmapSurface"); + FunctionTable[EglFuncName::CreateWindowSurface] = pvr::lib::getLibFunctionChecked(lib, "eglCreateWindowSurface"); + FunctionTable[EglFuncName::DestroyContext] = pvr::lib::getLibFunctionChecked(lib, "eglDestroyContext"); + FunctionTable[EglFuncName::DestroySurface] = pvr::lib::getLibFunctionChecked(lib, "eglDestroySurface"); + FunctionTable[EglFuncName::GetConfigAttrib] = pvr::lib::getLibFunctionChecked(lib, "eglGetConfigAttrib"); + FunctionTable[EglFuncName::GetConfigs] = pvr::lib::getLibFunctionChecked(lib, "eglGetConfigs"); + FunctionTable[EglFuncName::GetCurrentDisplay] = pvr::lib::getLibFunctionChecked(lib, "eglGetCurrentDisplay"); + FunctionTable[EglFuncName::GetCurrentSurface] = pvr::lib::getLibFunctionChecked(lib, "eglGetCurrentSurface"); + FunctionTable[EglFuncName::GetDisplay] = pvr::lib::getLibFunctionChecked(lib, "eglGetDisplay"); + FunctionTable[EglFuncName::GetError] = pvr::lib::getLibFunctionChecked(lib, "eglGetError"); + FunctionTable[EglFuncName::GetProcAddress] = pvr::lib::getLibFunctionChecked(lib, "eglGetProcAddress"); + FunctionTable[EglFuncName::Initialize] = pvr::lib::getLibFunctionChecked(lib, "eglInitialize"); + FunctionTable[EglFuncName::MakeCurrent] = pvr::lib::getLibFunctionChecked(lib, "eglMakeCurrent"); + FunctionTable[EglFuncName::QueryContext] = pvr::lib::getLibFunctionChecked(lib, "eglQueryContext"); + FunctionTable[EglFuncName::QueryString] = pvr::lib::getLibFunctionChecked(lib, "eglQueryString"); + FunctionTable[EglFuncName::QuerySurface] = pvr::lib::getLibFunctionChecked(lib, "eglQuerySurface"); + FunctionTable[EglFuncName::SwapBuffers] = pvr::lib::getLibFunctionChecked(lib, "eglSwapBuffers"); + FunctionTable[EglFuncName::Terminate] = pvr::lib::getLibFunctionChecked(lib, "eglTerminate"); + FunctionTable[EglFuncName::WaitGL] = pvr::lib::getLibFunctionChecked(lib, "eglWaitGL"); + FunctionTable[EglFuncName::WaitNative] = pvr::lib::getLibFunctionChecked(lib, "eglWaitNative"); + FunctionTable[EglFuncName::BindTexImage] = pvr::lib::getLibFunctionChecked(lib, "eglBindTexImage"); + FunctionTable[EglFuncName::ReleaseTexImage] = pvr::lib::getLibFunctionChecked(lib, "eglReleaseTexImage"); + FunctionTable[EglFuncName::SurfaceAttrib] = pvr::lib::getLibFunctionChecked(lib, "eglSurfaceAttrib"); + FunctionTable[EglFuncName::SwapInterval] = pvr::lib::getLibFunctionChecked(lib, "eglSwapInterval"); + FunctionTable[EglFuncName::BindAPI] = pvr::lib::getLibFunctionChecked(lib, "eglBindAPI"); + FunctionTable[EglFuncName::QueryAPI] = pvr::lib::getLibFunctionChecked(lib, "eglQueryAPI"); + FunctionTable[EglFuncName::CreatePbufferFromClientBuffer] = pvr::lib::getLibFunctionChecked(lib, "eglCreatePbufferFromClientBuffer"); + FunctionTable[EglFuncName::ReleaseThread] = pvr::lib::getLibFunctionChecked(lib, "eglReleaseThread"); + FunctionTable[EglFuncName::WaitClient] = pvr::lib::getLibFunctionChecked(lib, "eglWaitClient"); + FunctionTable[EglFuncName::GetCurrentContext] = pvr::lib::getLibFunctionChecked(lib, "eglGetCurrentContext"); + FunctionTable[EglFuncName::CreateSync] = pvr::lib::getLibFunctionChecked(lib, "eglCreateSync"); + FunctionTable[EglFuncName::DestroySync] = pvr::lib::getLibFunctionChecked(lib, "eglDestroySync"); + FunctionTable[EglFuncName::ClientWaitSync] = pvr::lib::getLibFunctionChecked(lib, "eglClientWaitSync"); + FunctionTable[EglFuncName::GetSyncAttrib] = pvr::lib::getLibFunctionChecked(lib, "eglGetSyncAttrib"); + FunctionTable[EglFuncName::CreateImage] = pvr::lib::getLibFunctionChecked(lib, "eglCreateImage"); + FunctionTable[EglFuncName::DestroyImage] = pvr::lib::getLibFunctionChecked(lib, "eglDestroyImage"); + FunctionTable[EglFuncName::GetPlatformDisplay] = pvr::lib::getLibFunctionChecked(lib, "eglGetPlatformDisplay"); + FunctionTable[EglFuncName::CreatePlatformWindowSurface] = pvr::lib::getLibFunctionChecked(lib, "eglCreatePlatformWindowSurface"); + FunctionTable[EglFuncName::CreatePlatformPixmapSurface] = pvr::lib::getLibFunctionChecked(lib, "eglCreatePlatformPixmapSurface"); + FunctionTable[EglFuncName::WaitSync] = pvr::lib::getLibFunctionChecked(lib, "eglWaitSync"); + } + return FunctionTable[funcname]; +} +} +} + +#ifndef DYNAMICEGL_NO_NAMESPACE +#define DYNAMICEGL_FUNCTION(name) EGLAPIENTRY name +#define DYNAMICEGL_CALL_FUNCTION(name) egl::name +#else +#define DYNAMICEGL_FUNCTION(name) EGLAPIENTRY egl##name +#define DYNAMICEGL_CALL_FUNCTION(name) egl##name +#endif + + +#ifndef DYNAMICEGL_NO_NAMESPACE +namespace egl { +#endif + +#if defined(__QNXNTO__) +EGLBoolean DYNAMICEGL_FUNCTION(GetConfigAttrib)(EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint* value); +#endif + +inline EGLBoolean DYNAMICEGL_FUNCTION(ChooseConfig)(EGLDisplay dpy, const EGLint* attrib_list, EGLConfig* configs, EGLint config_size, EGLint* num_config) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_ChooseConfig)(EGLDisplay dpy, const EGLint * attrib_list, EGLConfig * configs, EGLint config_size, EGLint * num_config); + static PROC_EGL_ChooseConfig _ChooseConfig = (PROC_EGL_ChooseConfig)egl::internal::getEglFunction(egl::internal::EglFuncName::ChooseConfig); + return _ChooseConfig(dpy, attrib_list, configs, config_size, num_config); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(CopyBuffers)(EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_CopyBuffers)(EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target); + static PROC_EGL_CopyBuffers _CopyBuffers = (PROC_EGL_CopyBuffers)egl::internal::getEglFunction(egl::internal::EglFuncName::CopyBuffers); + return _CopyBuffers(dpy, surface, target); +} +inline EGLContext DYNAMICEGL_FUNCTION(CreateContext)(EGLDisplay dpy, EGLConfig config, EGLContext share_context, const EGLint* attrib_list) +{ + typedef EGLContext(EGLAPIENTRY * PROC_EGL_CreateContext)(EGLDisplay dpy, EGLConfig config, EGLContext share_context, const EGLint * attrib_list); + static PROC_EGL_CreateContext _CreateContext = (PROC_EGL_CreateContext)egl::internal::getEglFunction(egl::internal::EglFuncName::CreateContext); + + EGLContext context = _CreateContext(dpy, config, share_context, attrib_list); + +#if defined(__QNXNTO__) + /* + On QNX NTO the libGLESv2 library needs to be loaded before the first eglMakeCurrent call + */ + static bool once = false; + if (!once) + { + if (context != EGL_NO_CONTEXT) + { + EGLint type = 0; +#ifdef DYNAMICEGL_NO_NAMESPACE + eglGetConfigAttrib(dpy, config, EGL_RENDERABLE_TYPE, &type); +#else + GetConfigAttrib(dpy, config, EGL_RENDERABLE_TYPE, &type); +#endif + + if ((type & EGL_OPENGL_ES2_BIT) == EGL_OPENGL_ES2_BIT) + { + Log_Info("EGL Bindings: Preloading libGLESv2.so\n"); + pvr::lib::openlib("libGLESv2.so"); + once = true; + } + } + } +#endif + return context; +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreatePbufferSurface)(EGLDisplay dpy, EGLConfig config, const EGLint* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PROC_EGL_CreatePbufferSurface)(EGLDisplay dpy, EGLConfig config, const EGLint * attrib_list); + static PROC_EGL_CreatePbufferSurface _CreatePbufferSurface = (PROC_EGL_CreatePbufferSurface)egl::internal::getEglFunction(egl::internal::EglFuncName::CreatePbufferSurface); + return _CreatePbufferSurface(dpy, config, attrib_list); +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreatePixmapSurface)(EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, const EGLint* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PROC_EGL_CreatePixmapSurface)(EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, const EGLint * attrib_list); + static PROC_EGL_CreatePixmapSurface _CreatePixmapSurface = (PROC_EGL_CreatePixmapSurface)egl::internal::getEglFunction(egl::internal::EglFuncName::CreatePixmapSurface); + return _CreatePixmapSurface(dpy, config, pixmap, attrib_list); +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreateWindowSurface)(EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, const EGLint* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PROC_EGL_CreateWindowSurface)(EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, const EGLint * attrib_list); + static PROC_EGL_CreateWindowSurface _CreateWindowSurface = (PROC_EGL_CreateWindowSurface)egl::internal::getEglFunction(egl::internal::EglFuncName::CreateWindowSurface); + return _CreateWindowSurface(dpy, config, win, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(DestroyContext)(EGLDisplay dpy, EGLContext ctx) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_DestroyContext)(EGLDisplay dpy, EGLContext ctx); + static PROC_EGL_DestroyContext _DestroyContext = (PROC_EGL_DestroyContext)egl::internal::getEglFunction(egl::internal::EglFuncName::DestroyContext); + return _DestroyContext(dpy, ctx); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(DestroySurface)(EGLDisplay dpy, EGLSurface surface) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_DestroySurface)(EGLDisplay dpy, EGLSurface surface); + static PROC_EGL_DestroySurface _DestroySurface = (PROC_EGL_DestroySurface)egl::internal::getEglFunction(egl::internal::EglFuncName::DestroySurface); + return _DestroySurface(dpy, surface); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(GetConfigAttrib)(EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_GetConfigAttrib)(EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint * value); + static PROC_EGL_GetConfigAttrib _GetConfigAttrib = (PROC_EGL_GetConfigAttrib)egl::internal::getEglFunction(egl::internal::EglFuncName::GetConfigAttrib); + return _GetConfigAttrib(dpy, config, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(GetConfigs)(EGLDisplay dpy, EGLConfig* configs, EGLint config_size, EGLint* num_config) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_GetConfigs)(EGLDisplay dpy, EGLConfig * configs, EGLint config_size, EGLint * num_config); + static PROC_EGL_GetConfigs _GetConfigs = (PROC_EGL_GetConfigs)egl::internal::getEglFunction(egl::internal::EglFuncName::GetConfigs); + return _GetConfigs(dpy, configs, config_size, num_config); +} +inline EGLDisplay DYNAMICEGL_FUNCTION(GetCurrentDisplay)(void) +{ + typedef EGLDisplay(EGLAPIENTRY * PROC_EGL_GetCurrentDisplay)(void); + static PROC_EGL_GetCurrentDisplay _GetCurrentDisplay = (PROC_EGL_GetCurrentDisplay)egl::internal::getEglFunction(egl::internal::EglFuncName::GetCurrentDisplay); + return _GetCurrentDisplay(); +} +inline EGLSurface DYNAMICEGL_FUNCTION(GetCurrentSurface)(EGLint readdraw) +{ + typedef EGLSurface(EGLAPIENTRY * PROC_EGL_GetCurrentSurface)(EGLint readdraw); + static PROC_EGL_GetCurrentSurface _GetCurrentSurface = (PROC_EGL_GetCurrentSurface)egl::internal::getEglFunction(egl::internal::EglFuncName::GetCurrentSurface); + return _GetCurrentSurface(readdraw); +} +inline EGLDisplay DYNAMICEGL_FUNCTION(GetDisplay)(EGLNativeDisplayType display_id) +{ + typedef EGLDisplay(EGLAPIENTRY * PROC_EGL_GetDisplay)(EGLNativeDisplayType display_id); + static PROC_EGL_GetDisplay _GetDisplay = (PROC_EGL_GetDisplay)egl::internal::getEglFunction(egl::internal::EglFuncName::GetDisplay); + return _GetDisplay(display_id); +} +inline EGLint DYNAMICEGL_FUNCTION(GetError)(void) +{ + typedef EGLint(EGLAPIENTRY * PROC_EGL_GetError)(void); + static PROC_EGL_GetError _GetError = (PROC_EGL_GetError)egl::internal::getEglFunction(egl::internal::EglFuncName::GetError); + return _GetError(); +} +inline __eglMustCastToProperFunctionPointerType DYNAMICEGL_FUNCTION(GetProcAddress)(const char* procname) +{ + typedef __eglMustCastToProperFunctionPointerType(EGLAPIENTRY * PROC_EGL_GetProcAddress)(const char* procname); + static PROC_EGL_GetProcAddress _GetProcAddress = (PROC_EGL_GetProcAddress)egl::internal::getEglFunction(egl::internal::EglFuncName::GetProcAddress); + return _GetProcAddress(procname); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(Initialize)(EGLDisplay dpy, EGLint* major, EGLint* minor) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_Initialize)(EGLDisplay dpy, EGLint * major, EGLint * minor); + static PROC_EGL_Initialize _Initialize = (PROC_EGL_Initialize)egl::internal::getEglFunction(egl::internal::EglFuncName::Initialize); + return _Initialize(dpy, major, minor); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(MakeCurrent)(EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_MakeCurrent)(EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx); + static PROC_EGL_MakeCurrent _MakeCurrent = (PROC_EGL_MakeCurrent)egl::internal::getEglFunction(egl::internal::EglFuncName::MakeCurrent); + return _MakeCurrent(dpy, draw, read, ctx); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryContext)(EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_QueryContext)(EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint * value); + static PROC_EGL_QueryContext _QueryContext = (PROC_EGL_QueryContext)egl::internal::getEglFunction(egl::internal::EglFuncName::QueryContext); + return _QueryContext(dpy, ctx, attribute, value); +} +inline const char* DYNAMICEGL_FUNCTION(QueryString)(EGLDisplay dpy, EGLint name) +{ + typedef const char* (EGLAPIENTRY * PROC_EGL_QueryString)(EGLDisplay dpy, EGLint name); + static PROC_EGL_QueryString _QueryString = (PROC_EGL_QueryString)egl::internal::getEglFunction(egl::internal::EglFuncName::QueryString); + return _QueryString(dpy, name); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QuerySurface)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_QuerySurface)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint * value); + static PROC_EGL_QuerySurface _QuerySurface = (PROC_EGL_QuerySurface)egl::internal::getEglFunction(egl::internal::EglFuncName::QuerySurface); + return _QuerySurface(dpy, surface, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SwapBuffers)(EGLDisplay dpy, EGLSurface surface) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_SwapBuffers)(EGLDisplay dpy, EGLSurface surface); + static PROC_EGL_SwapBuffers _SwapBuffers = (PROC_EGL_SwapBuffers)egl::internal::getEglFunction(egl::internal::EglFuncName::SwapBuffers); + return _SwapBuffers(dpy, surface); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(Terminate)(EGLDisplay dpy) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_Terminate)(EGLDisplay dpy); + static PROC_EGL_Terminate _Terminate = (PROC_EGL_Terminate)egl::internal::getEglFunction(egl::internal::EglFuncName::Terminate); + return _Terminate(dpy); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(WaitGL)(void) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_WaitGL)(void); + static PROC_EGL_WaitGL _WaitGL = (PROC_EGL_WaitGL)egl::internal::getEglFunction(egl::internal::EglFuncName::WaitGL); + return _WaitGL(); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(WaitNative)(EGLint engine) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_WaitNative)(EGLint engine); + static PROC_EGL_WaitNative _WaitNative = (PROC_EGL_WaitNative)egl::internal::getEglFunction(egl::internal::EglFuncName::WaitNative); + return _WaitNative(engine); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(BindTexImage)(EGLDisplay dpy, EGLSurface surface, EGLint buffer) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_BindTexImage)(EGLDisplay dpy, EGLSurface surface, EGLint buffer); + static PROC_EGL_BindTexImage _BindTexImage = (PROC_EGL_BindTexImage)egl::internal::getEglFunction(egl::internal::EglFuncName::BindTexImage); + return _BindTexImage(dpy, surface, buffer); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(ReleaseTexImage)(EGLDisplay dpy, EGLSurface surface, EGLint buffer) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_ReleaseTexImage)(EGLDisplay dpy, EGLSurface surface, EGLint buffer); + static PROC_EGL_ReleaseTexImage _ReleaseTexImage = (PROC_EGL_ReleaseTexImage)egl::internal::getEglFunction(egl::internal::EglFuncName::ReleaseTexImage); + return _ReleaseTexImage(dpy, surface, buffer); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SurfaceAttrib)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_SurfaceAttrib)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value); + static PROC_EGL_SurfaceAttrib _SurfaceAttrib = (PROC_EGL_SurfaceAttrib)egl::internal::getEglFunction(egl::internal::EglFuncName::SurfaceAttrib); + return _SurfaceAttrib(dpy, surface, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SwapInterval)(EGLDisplay dpy, EGLint interval) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_SwapInterval)(EGLDisplay dpy, EGLint interval); + static PROC_EGL_SwapInterval _SwapInterval = (PROC_EGL_SwapInterval)egl::internal::getEglFunction(egl::internal::EglFuncName::SwapInterval); + return _SwapInterval(dpy, interval); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(BindAPI)(EGLenum api) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_BindAPI)(EGLenum api); + static PROC_EGL_BindAPI _BindAPI = (PROC_EGL_BindAPI)egl::internal::getEglFunction(egl::internal::EglFuncName::BindAPI); + return _BindAPI(api); +} +inline EGLenum DYNAMICEGL_FUNCTION(QueryAPI)(void) +{ + typedef EGLenum(EGLAPIENTRY * PROC_EGL_QueryAPI)(void); + static PROC_EGL_QueryAPI _QueryAPI = (PROC_EGL_QueryAPI)egl::internal::getEglFunction(egl::internal::EglFuncName::QueryAPI); + return _QueryAPI(); +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreatePbufferFromClientBuffer)(EGLDisplay dpy, EGLenum buftype, EGLClientBuffer buffer, EGLConfig config, const EGLint* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PROC_EGL_CreatePbufferFromClientBuffer)(EGLDisplay dpy, EGLenum buftype, EGLClientBuffer buffer, EGLConfig config, const EGLint * attrib_list); + static PROC_EGL_CreatePbufferFromClientBuffer _CreatePbufferFromClientBuffer = (PROC_EGL_CreatePbufferFromClientBuffer)egl::internal::getEglFunction(egl::internal::EglFuncName::CreatePbufferFromClientBuffer); + return _CreatePbufferFromClientBuffer(dpy, buftype, buffer, config, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(ReleaseThread)(void) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_ReleaseThread)(void); + static PROC_EGL_ReleaseThread _ReleaseThread = (PROC_EGL_ReleaseThread)egl::internal::getEglFunction(egl::internal::EglFuncName::ReleaseThread); + return _ReleaseThread(); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(WaitClient)(void) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_WaitClient)(void); + static PROC_EGL_WaitClient _WaitClient = (PROC_EGL_WaitClient)egl::internal::getEglFunction(egl::internal::EglFuncName::WaitClient); + return _WaitClient(); +} +inline EGLContext DYNAMICEGL_FUNCTION(GetCurrentContext)(void) +{ + typedef EGLContext(EGLAPIENTRY * PROC_EGL_GetCurrentContext)(void); + static PROC_EGL_GetCurrentContext _GetCurrentContext = (PROC_EGL_GetCurrentContext)egl::internal::getEglFunction(egl::internal::EglFuncName::GetCurrentContext); + return _GetCurrentContext(); +} +inline EGLSync DYNAMICEGL_FUNCTION(CreateSync)(EGLDisplay dpy, EGLenum type, const EGLAttrib* attrib_list) +{ + typedef EGLSync(EGLAPIENTRY * PROC_EGL_CreateSync)(EGLDisplay dpy, EGLenum type, const EGLAttrib * attrib_list); + static PROC_EGL_CreateSync _CreateSync = (PROC_EGL_CreateSync)egl::internal::getEglFunction(egl::internal::EglFuncName::CreateSync); + return _CreateSync(dpy, type, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(DestroySync)(EGLDisplay dpy, EGLSync sync) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_DestroySync)(EGLDisplay dpy, EGLSync sync); + static PROC_EGL_DestroySync _DestroySync = (PROC_EGL_DestroySync)egl::internal::getEglFunction(egl::internal::EglFuncName::DestroySync); + return _DestroySync(dpy, sync); +} +inline EGLint DYNAMICEGL_FUNCTION(ClientWaitSync)(EGLDisplay dpy, EGLSync sync, EGLint flags, EGLTime timeout) +{ + typedef EGLint(EGLAPIENTRY * PROC_EGL_ClientWaitSync)(EGLDisplay dpy, EGLSync sync, EGLint flags, EGLTime timeout); + static PROC_EGL_ClientWaitSync _ClientWaitSync = (PROC_EGL_ClientWaitSync)egl::internal::getEglFunction(egl::internal::EglFuncName::ClientWaitSync); + return _ClientWaitSync(dpy, sync, flags, timeout); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(GetSyncAttrib)(EGLDisplay dpy, EGLSync sync, EGLint attribute, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_GetSyncAttrib)(EGLDisplay dpy, EGLSync sync, EGLint attribute, EGLAttrib * value); + static PROC_EGL_GetSyncAttrib _GetSyncAttrib = (PROC_EGL_GetSyncAttrib)egl::internal::getEglFunction(egl::internal::EglFuncName::GetSyncAttrib); + return _GetSyncAttrib(dpy, sync, attribute, value); +} +inline EGLImage DYNAMICEGL_FUNCTION(CreateImage)(EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLAttrib* attrib_list) +{ + typedef EGLImage(EGLAPIENTRY * PROC_EGL_CreateImage)(EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLAttrib * attrib_list); + static PROC_EGL_CreateImage _CreateImage = (PROC_EGL_CreateImage)egl::internal::getEglFunction(egl::internal::EglFuncName::CreateImage); + return _CreateImage(dpy, ctx, target, buffer, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(DestroyImage)(EGLDisplay dpy, EGLImage image) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_DestroyImage)(EGLDisplay dpy, EGLImage image); + static PROC_EGL_DestroyImage _DestroyImage = (PROC_EGL_DestroyImage)egl::internal::getEglFunction(egl::internal::EglFuncName::DestroyImage); + return _DestroyImage(dpy, image); +} +inline EGLDisplay DYNAMICEGL_FUNCTION(GetPlatformDisplay)(EGLenum platform, void* native_display, const EGLAttrib* attrib_list) +{ + typedef EGLDisplay(EGLAPIENTRY * PROC_EGL_GetPlatformDisplay)(EGLenum platform, void* native_display, const EGLAttrib * attrib_list); + static PROC_EGL_GetPlatformDisplay _GetPlatformDisplay = (PROC_EGL_GetPlatformDisplay)egl::internal::getEglFunction(egl::internal::EglFuncName::GetPlatformDisplay); + return _GetPlatformDisplay(platform, native_display, attrib_list); +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreatePlatformWindowSurface)(EGLDisplay dpy, EGLConfig config, void* native_window, const EGLAttrib* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PROC_EGL_CreatePlatformWindowSurface)(EGLDisplay dpy, EGLConfig config, void* native_window, const EGLAttrib * attrib_list); + static PROC_EGL_CreatePlatformWindowSurface _CreatePlatformWindowSurface = (PROC_EGL_CreatePlatformWindowSurface)egl::internal::getEglFunction(egl::internal::EglFuncName::CreatePlatformWindowSurface); + return _CreatePlatformWindowSurface(dpy, config, native_window, attrib_list); +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreatePlatformPixmapSurface)(EGLDisplay dpy, EGLConfig config, void* native_pixmap, const EGLAttrib* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PROC_EGL_CreatePlatformPixmapSurface)(EGLDisplay dpy, EGLConfig config, void* native_pixmap, const EGLAttrib * attrib_list); + static PROC_EGL_CreatePlatformPixmapSurface _CreatePlatformPixmapSurface = (PROC_EGL_CreatePlatformPixmapSurface)egl::internal::getEglFunction(egl::internal::EglFuncName::CreatePlatformPixmapSurface); + return _CreatePlatformPixmapSurface(dpy, config, native_pixmap, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(WaitSync)(EGLDisplay dpy, EGLSync sync, EGLint flags) +{ + typedef EGLBoolean(EGLAPIENTRY * PROC_EGL_WaitSync)(EGLDisplay dpy, EGLSync sync, EGLint flags); + static PROC_EGL_WaitSync _WaitSync = (PROC_EGL_WaitSync)egl::internal::getEglFunction(egl::internal::EglFuncName::WaitSync); + return _WaitSync(dpy, sync, flags); +} +#ifndef DYNAMICEGL_NO_NAMESPACE +} +#endif +#ifndef __GNUC__ +#pragma endregion +#endif + +#ifndef __GNUC__ +#pragma region EGL_EXT +#endif + +namespace egl { +namespace internal { +namespace EglExtFuncName { +enum EglExtFunctionName +{ + CreateSync64KHR, DebugMessageControlKHR, QueryDebugKHR, LabelObjectKHR, QueryDisplayAttribKHR, CreateSyncKHR, DestroySyncKHR, + ClientWaitSyncKHR, GetSyncAttribKHR, CreateImageKHR, DestroyImageKHR, LockSurfaceKHR, UnlockSurfaceKHR, QuerySurface64KHR, + SetDamageRegionKHR, SignalSyncKHR, CreateStreamKHR, DestroyStreamKHR, StreamAttribKHR, QueryStreamKHR, QueryStreamu64KHR, + CreateStreamAttribKHR, SetStreamAttribKHR, QueryStreamAttribKHR, StreamConsumerAcquireAttribKHR, StreamConsumerReleaseAttribKHR, + StreamConsumerGLTextureExternalKHR, StreamConsumerAcquireKHR, StreamConsumerReleaseKHR, GetStreamFileDescriptorKHR, + CreateStreamFromFileDescriptorKHR, QueryStreamTimeKHR, CreateStreamProducerSurfaceKHR, SwapBuffersWithDamageKHR, WaitSyncKHR, + SetBlobCacheFuncsANDROID, CreateNativeClientBufferANDROID, DupNativeFenceFDANDROID, PresentationTimeANDROID, QuerySurfacePointerANGLE, + CompositorSetContextListEXT, CompositorSetContextAttributesEXT, CompositorSetWindowListEXT, CompositorSetWindowAttributesEXT, + CompositorBindTexWindowEXT, CompositorSetSizeEXT, CompositorSwapPolicyEXT, QueryDeviceAttribEXT, QueryDeviceStringEXT, + QueryDevicesEXT, QueryDisplayAttribEXT, QueryDmaBufFormatsEXT, QueryDmaBufModifiersEXT, GetOutputLayersEXT, GetOutputPortsEXT, + OutputLayerAttribEXT, QueryOutputLayerAttribEXT, QueryOutputLayerStringEXT, OutputPortAttribEXT, QueryOutputPortAttribEXT, + QueryOutputPortStringEXT, GetPlatformDisplayEXT, CreatePlatformWindowSurfaceEXT, CreatePlatformPixmapSurfaceEXT, StreamConsumerOutputEXT, + SwapBuffersWithDamageEXT, CreatePixmapSurfaceHI, CreateDRMImageMESA, ExportDRMImageMESA, ExportDMABUFImageQueryMESA, + ExportDMABUFImageMESA, SwapBuffersRegionNOK, SwapBuffersRegion2NOK, QueryNativeDisplayNV, QueryNativeWindowNV, QueryNativePixmapNV, + PostSubBufferNV, StreamConsumerGLTextureExternalAttribsNV, QueryDisplayAttribNV, SetStreamMetadataNV, QueryStreamMetadataNV, + ResetStreamNV, CreateStreamSyncNV, CreateFenceSyncNV, DestroySyncNV, FenceNV, ClientWaitSyncNV, SignalSyncNV, GetSyncAttribNV, + GetSystemTimeFrequencyNV, GetSystemTimeNV, + NUMBER_OF_EGL_EXT_FUNCTIONS +}; +} + +static inline void* getEglExtensionFunction(const char* funcName) +{ + return (void*)DYNAMICEGL_CALL_FUNCTION(GetProcAddress)(funcName); +} + +static inline bool isExtensionSupported(const char* const extensionString, const char* const extension) +{ + if (!extensionString) + { + return false; + } + + // The recommended technique for querying OpenGL extensions; + // from http://opengl.org/resources/features/OGLextensions/ + const char* start = extensionString; + char* position, *terminator; + + // Extension names should not have spaces. + position = (char*)strchr(extension, ' '); + + if (position || *extension == '\0') + { + return 0; + } + + /* It takes a bit of care to be fool-proof about parsing the + OpenGL extensions string. Don't be fooled by sub-strings, etc. */ + for (;;) + { + position = (char*)strstr((char*)start, extension); + + if (!position) + { + break; + } + + terminator = position + strlen(extension); + + if (position == start || *(position - 1) == ' ') + { + if (*terminator == ' ' || *terminator == '\0') + { + return true; + } + } + + start = terminator; + } + + return false; +} + +inline void* getEglExtFunction(egl::internal::EglExtFuncName::EglExtFunctionName funcname, bool reset = false) +{ + static void* FunctionTable[EglExtFuncName::NUMBER_OF_EGL_EXT_FUNCTIONS]; + + // GET FUNCTION POINTERS --- ONCE!!!! /// + if (!FunctionTable[0] || reset) + { + FunctionTable[EglExtFuncName::CreateSync64KHR] = egl::internal::getEglExtensionFunction("eglCreateSync64KHR"); + FunctionTable[EglExtFuncName::DebugMessageControlKHR] = egl::internal::getEglExtensionFunction("eglDebugMessageControlKHR"); + FunctionTable[EglExtFuncName::QueryDebugKHR] = egl::internal::getEglExtensionFunction("eglQueryDebugKHR"); + FunctionTable[EglExtFuncName::LabelObjectKHR] = egl::internal::getEglExtensionFunction("eglLabelObjectKHR"); + FunctionTable[EglExtFuncName::QueryDisplayAttribKHR] = egl::internal::getEglExtensionFunction("eglQueryDisplayAttribKHR"); + FunctionTable[EglExtFuncName::CreateSyncKHR] = egl::internal::getEglExtensionFunction("eglCreateSyncKHR"); + FunctionTable[EglExtFuncName::DestroySyncKHR] = egl::internal::getEglExtensionFunction("eglDestroySyncKHR"); + FunctionTable[EglExtFuncName::ClientWaitSyncKHR] = egl::internal::getEglExtensionFunction("eglClientWaitSyncKHR"); + FunctionTable[EglExtFuncName::GetSyncAttribKHR] = egl::internal::getEglExtensionFunction("eglGetSyncAttribKHR"); + FunctionTable[EglExtFuncName::CreateImageKHR] = egl::internal::getEglExtensionFunction("eglCreateImageKHR"); + FunctionTable[EglExtFuncName::DestroyImageKHR] = egl::internal::getEglExtensionFunction("eglDestroyImageKHR"); + FunctionTable[EglExtFuncName::LockSurfaceKHR] = egl::internal::getEglExtensionFunction("eglLockSurfaceKHR"); + FunctionTable[EglExtFuncName::UnlockSurfaceKHR] = egl::internal::getEglExtensionFunction("eglUnlockSurfaceKHR"); + FunctionTable[EglExtFuncName::QuerySurface64KHR] = egl::internal::getEglExtensionFunction("eglQuerySurface64KHR"); + FunctionTable[EglExtFuncName::SetDamageRegionKHR] = egl::internal::getEglExtensionFunction("eglSetDamageRegionKHR"); + FunctionTable[EglExtFuncName::SignalSyncKHR] = egl::internal::getEglExtensionFunction("eglSignalSyncKHR"); + FunctionTable[EglExtFuncName::CreateStreamKHR] = egl::internal::getEglExtensionFunction("eglCreateStreamKHR"); + FunctionTable[EglExtFuncName::DestroyStreamKHR] = egl::internal::getEglExtensionFunction("eglDestroyStreamKHR"); + FunctionTable[EglExtFuncName::StreamAttribKHR] = egl::internal::getEglExtensionFunction("eglStreamAttribKHR"); + FunctionTable[EglExtFuncName::QueryStreamKHR] = egl::internal::getEglExtensionFunction("eglQueryStreamKHR"); + FunctionTable[EglExtFuncName::QueryStreamu64KHR] = egl::internal::getEglExtensionFunction("eglQueryStreamu64KHR"); + FunctionTable[EglExtFuncName::CreateStreamAttribKHR] = egl::internal::getEglExtensionFunction("eglCreateStreamAttribKHR"); + FunctionTable[EglExtFuncName::SetStreamAttribKHR] = egl::internal::getEglExtensionFunction("eglSetStreamAttribKHR"); + FunctionTable[EglExtFuncName::QueryStreamAttribKHR] = egl::internal::getEglExtensionFunction("eglQueryStreamAttribKHR"); + FunctionTable[EglExtFuncName::StreamConsumerAcquireAttribKHR] = egl::internal::getEglExtensionFunction("eglStreamConsumerAcquireAttribKHR"); + FunctionTable[EglExtFuncName::StreamConsumerReleaseAttribKHR] = egl::internal::getEglExtensionFunction("eglStreamConsumerReleaseAttribKHR"); + FunctionTable[EglExtFuncName::StreamConsumerGLTextureExternalKHR] = egl::internal::getEglExtensionFunction("eglStreamConsumerGLTextureExternalKHR"); + FunctionTable[EglExtFuncName::StreamConsumerAcquireKHR] = egl::internal::getEglExtensionFunction("eglStreamConsumerAcquireKHR"); + FunctionTable[EglExtFuncName::StreamConsumerReleaseKHR] = egl::internal::getEglExtensionFunction("eglStreamConsumerReleaseKHR"); + FunctionTable[EglExtFuncName::GetStreamFileDescriptorKHR] = egl::internal::getEglExtensionFunction("eglGetStreamFileDescriptorKHR"); + FunctionTable[EglExtFuncName::CreateStreamFromFileDescriptorKHR] = egl::internal::getEglExtensionFunction("eglCreateStreamFromFileDescriptorKHR"); + FunctionTable[EglExtFuncName::QueryStreamTimeKHR] = egl::internal::getEglExtensionFunction("eglQueryStreamTimeKHR"); + FunctionTable[EglExtFuncName::CreateStreamProducerSurfaceKHR] = egl::internal::getEglExtensionFunction("eglCreateStreamProducerSurfaceKHR"); + FunctionTable[EglExtFuncName::SwapBuffersWithDamageKHR] = egl::internal::getEglExtensionFunction("eglSwapBuffersWithDamageKHR"); + FunctionTable[EglExtFuncName::WaitSyncKHR] = egl::internal::getEglExtensionFunction("eglWaitSyncKHR"); + FunctionTable[EglExtFuncName::SetBlobCacheFuncsANDROID] = egl::internal::getEglExtensionFunction("eglSetBlobCacheFuncsANDROID"); + FunctionTable[EglExtFuncName::CreateNativeClientBufferANDROID] = egl::internal::getEglExtensionFunction("eglCreateNativeClientBufferANDROID"); + FunctionTable[EglExtFuncName::DupNativeFenceFDANDROID] = egl::internal::getEglExtensionFunction("eglDupNativeFenceFDANDROID"); + FunctionTable[EglExtFuncName::PresentationTimeANDROID] = egl::internal::getEglExtensionFunction("eglPresentationTimeANDROID"); + FunctionTable[EglExtFuncName::QuerySurfacePointerANGLE] = egl::internal::getEglExtensionFunction("eglQuerySurfacePointerANGLE"); + FunctionTable[EglExtFuncName::CompositorSetContextListEXT] = egl::internal::getEglExtensionFunction("eglCompositorSetContextListEXT"); + FunctionTable[EglExtFuncName::CompositorSetContextAttributesEXT] = egl::internal::getEglExtensionFunction("eglCompositorSetContextAttributesEXT"); + FunctionTable[EglExtFuncName::CompositorSetWindowListEXT] = egl::internal::getEglExtensionFunction("eglCompositorSetWindowListEXT"); + FunctionTable[EglExtFuncName::CompositorSetWindowAttributesEXT] = egl::internal::getEglExtensionFunction("eglCompositorSetWindowAttributesEXT"); + FunctionTable[EglExtFuncName::CompositorBindTexWindowEXT] = egl::internal::getEglExtensionFunction("eglCompositorBindTexWindowEXT"); + FunctionTable[EglExtFuncName::CompositorSetSizeEXT] = egl::internal::getEglExtensionFunction("eglCompositorSetSizeEXT"); + FunctionTable[EglExtFuncName::CompositorSwapPolicyEXT] = egl::internal::getEglExtensionFunction("eglCompositorSwapPolicyEXT"); + FunctionTable[EglExtFuncName::QueryDeviceAttribEXT] = egl::internal::getEglExtensionFunction("eglQueryDeviceAttribEXT"); + FunctionTable[EglExtFuncName::QueryDeviceStringEXT] = egl::internal::getEglExtensionFunction("eglQueryDeviceStringEXT"); + FunctionTable[EglExtFuncName::QueryDevicesEXT] = egl::internal::getEglExtensionFunction("eglQueryDevicesEXT"); + FunctionTable[EglExtFuncName::QueryDisplayAttribEXT] = egl::internal::getEglExtensionFunction("eglQueryDisplayAttribEXT"); + FunctionTable[EglExtFuncName::QueryDmaBufFormatsEXT] = egl::internal::getEglExtensionFunction("eglQueryDmaBufFormatsEXT"); + FunctionTable[EglExtFuncName::QueryDmaBufModifiersEXT] = egl::internal::getEglExtensionFunction("eglQueryDmaBufModifiersEXT"); + FunctionTable[EglExtFuncName::GetOutputLayersEXT] = egl::internal::getEglExtensionFunction("eglGetOutputLayersEXT"); + FunctionTable[EglExtFuncName::GetOutputPortsEXT] = egl::internal::getEglExtensionFunction("eglGetOutputPortsEXT"); + FunctionTable[EglExtFuncName::OutputLayerAttribEXT] = egl::internal::getEglExtensionFunction("eglOutputLayerAttribEXT"); + FunctionTable[EglExtFuncName::QueryOutputLayerAttribEXT] = egl::internal::getEglExtensionFunction("eglQueryOutputLayerAttribEXT"); + FunctionTable[EglExtFuncName::QueryOutputLayerStringEXT] = egl::internal::getEglExtensionFunction("eglQueryOutputLayerStringEXT"); + FunctionTable[EglExtFuncName::OutputPortAttribEXT] = egl::internal::getEglExtensionFunction("eglOutputPortAttribEXT"); + FunctionTable[EglExtFuncName::QueryOutputPortAttribEXT] = egl::internal::getEglExtensionFunction("eglQueryOutputPortAttribEXT"); + FunctionTable[EglExtFuncName::QueryOutputPortStringEXT] = egl::internal::getEglExtensionFunction("eglQueryOutputPortStringEXT"); + FunctionTable[EglExtFuncName::GetPlatformDisplayEXT] = egl::internal::getEglExtensionFunction("eglGetPlatformDisplayEXT"); + FunctionTable[EglExtFuncName::CreatePlatformWindowSurfaceEXT] = egl::internal::getEglExtensionFunction("eglCreatePlatformWindowSurfaceEXT"); + FunctionTable[EglExtFuncName::CreatePlatformPixmapSurfaceEXT] = egl::internal::getEglExtensionFunction("eglCreatePlatformPixmapSurfaceEXT"); + FunctionTable[EglExtFuncName::StreamConsumerOutputEXT] = egl::internal::getEglExtensionFunction("eglStreamConsumerOutputEXT"); + FunctionTable[EglExtFuncName::SwapBuffersWithDamageEXT] = egl::internal::getEglExtensionFunction("eglSwapBuffersWithDamageEXT"); + FunctionTable[EglExtFuncName::CreatePixmapSurfaceHI] = egl::internal::getEglExtensionFunction("eglCreatePixmapSurfaceHI"); + FunctionTable[EglExtFuncName::CreateDRMImageMESA] = egl::internal::getEglExtensionFunction("eglCreateDRMImageMESA"); + FunctionTable[EglExtFuncName::ExportDRMImageMESA] = egl::internal::getEglExtensionFunction("eglExportDRMImageMESA"); + FunctionTable[EglExtFuncName::ExportDMABUFImageQueryMESA] = egl::internal::getEglExtensionFunction("eglExportDMABUFImageQueryMESA"); + FunctionTable[EglExtFuncName::ExportDMABUFImageMESA] = egl::internal::getEglExtensionFunction("eglExportDMABUFImageMESA"); + FunctionTable[EglExtFuncName::SwapBuffersRegionNOK] = egl::internal::getEglExtensionFunction("eglSwapBuffersRegionNOK"); + FunctionTable[EglExtFuncName::SwapBuffersRegion2NOK] = egl::internal::getEglExtensionFunction("eglSwapBuffersRegion2NOK"); + FunctionTable[EglExtFuncName::QueryNativeDisplayNV] = egl::internal::getEglExtensionFunction("eglQueryNativeDisplayNV"); + FunctionTable[EglExtFuncName::QueryNativeWindowNV] = egl::internal::getEglExtensionFunction("eglQueryNativeWindowNV"); + FunctionTable[EglExtFuncName::QueryNativePixmapNV] = egl::internal::getEglExtensionFunction("eglQueryNativePixmapNV"); + FunctionTable[EglExtFuncName::PostSubBufferNV] = egl::internal::getEglExtensionFunction("eglPostSubBufferNV"); + FunctionTable[EglExtFuncName::StreamConsumerGLTextureExternalAttribsNV] = egl::internal::getEglExtensionFunction("eglStreamConsumerGLTextureExternalAttribsNV"); + FunctionTable[EglExtFuncName::QueryDisplayAttribNV] = egl::internal::getEglExtensionFunction("eglQueryDisplayAttribNV"); + FunctionTable[EglExtFuncName::SetStreamMetadataNV] = egl::internal::getEglExtensionFunction("eglSetStreamMetadataNV"); + FunctionTable[EglExtFuncName::QueryStreamMetadataNV] = egl::internal::getEglExtensionFunction("eglQueryStreamMetadataNV"); + FunctionTable[EglExtFuncName::ResetStreamNV] = egl::internal::getEglExtensionFunction("eglResetStreamNV"); + FunctionTable[EglExtFuncName::CreateStreamSyncNV] = egl::internal::getEglExtensionFunction("eglCreateStreamSyncNV"); + FunctionTable[EglExtFuncName::CreateFenceSyncNV] = egl::internal::getEglExtensionFunction("eglCreateFenceSyncNV"); + FunctionTable[EglExtFuncName::DestroySyncNV] = egl::internal::getEglExtensionFunction("eglDestroySyncNV"); + FunctionTable[EglExtFuncName::FenceNV] = egl::internal::getEglExtensionFunction("eglFenceNV"); + FunctionTable[EglExtFuncName::ClientWaitSyncNV] = egl::internal::getEglExtensionFunction("eglClientWaitSyncNV"); + FunctionTable[EglExtFuncName::SignalSyncNV] = egl::internal::getEglExtensionFunction("eglSignalSyncNV"); + FunctionTable[EglExtFuncName::GetSyncAttribNV] = egl::internal::getEglExtensionFunction("eglGetSyncAttribNV"); + FunctionTable[EglExtFuncName::GetSystemTimeFrequencyNV] = egl::internal::getEglExtensionFunction("eglGetSystemTimeFrequencyNV"); + FunctionTable[EglExtFuncName::GetSystemTimeNV] = egl::internal::getEglExtensionFunction("eglGetSystemTimeNV"); + + } + return FunctionTable[funcname]; +} +} +} +#ifndef __GNUC__ +#pragma endregion +#endif + +#ifndef DYNAMICEGL_NO_NAMESPACE +namespace egl { +namespace ext { +#endif +inline EGLSyncKHR DYNAMICEGL_FUNCTION(CreateSync64KHR)(EGLDisplay dpy, EGLenum type, const EGLAttribKHR* attrib_list) +{ + typedef EGLSyncKHR(EGLAPIENTRY * PFNEGLCreateSync64KHRPROC)(EGLDisplay dpy, EGLenum type, const EGLAttribKHR * attrib_list); + static PFNEGLCreateSync64KHRPROC _CreateSync64KHR = (PFNEGLCreateSync64KHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateSync64KHR); + return _CreateSync64KHR(dpy, type, attrib_list); +} +inline EGLint DYNAMICEGL_FUNCTION(DebugMessageControlKHR)(EGLDEBUGPROCKHR callback, const EGLAttrib* attrib_list) +{ + typedef EGLint(EGLAPIENTRY * PFNEGLDebugMessageControlKHRPROC)(EGLDEBUGPROCKHR callback, const EGLAttrib * attrib_list); + static PFNEGLDebugMessageControlKHRPROC _DebugMessageControlKHR = (PFNEGLDebugMessageControlKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::DebugMessageControlKHR); + return _DebugMessageControlKHR(callback, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryDebugKHR)(EGLint attribute, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryDebugKHRPROC)(EGLint attribute, EGLAttrib * value); + static PFNEGLQueryDebugKHRPROC _QueryDebugKHR = (PFNEGLQueryDebugKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDebugKHR); + return _QueryDebugKHR(attribute, value); +} +inline EGLint DYNAMICEGL_FUNCTION(LabelObjectKHR)(EGLDisplay display, EGLenum objectType, EGLObjectKHR object, EGLLabelKHR label) +{ + typedef EGLint(EGLAPIENTRY * PFNEGLLabelObjectKHRPROC)(EGLDisplay display, EGLenum objectType, EGLObjectKHR object, EGLLabelKHR label); + static PFNEGLLabelObjectKHRPROC _LabelObjectKHR = (PFNEGLLabelObjectKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::LabelObjectKHR); + return _LabelObjectKHR(display, objectType, object, label); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryDisplayAttribKHR)(EGLDisplay dpy, EGLint name, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryDisplayAttribKHRPROC)(EGLDisplay dpy, EGLint name, EGLAttrib * value); + static PFNEGLQueryDisplayAttribKHRPROC _QueryDisplayAttribKHR = (PFNEGLQueryDisplayAttribKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDisplayAttribKHR); + return _QueryDisplayAttribKHR(dpy, name, value); +} +inline EGLSyncKHR DYNAMICEGL_FUNCTION(CreateSyncKHR)(EGLDisplay dpy, EGLenum type, const EGLint* attrib_list) +{ + typedef EGLSyncKHR(EGLAPIENTRY * PFNEGLCreateSyncKHRPROC)(EGLDisplay dpy, EGLenum type, const EGLint * attrib_list); + static PFNEGLCreateSyncKHRPROC _CreateSyncKHR = (PFNEGLCreateSyncKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateSyncKHR); + return _CreateSyncKHR(dpy, type, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(DestroySyncKHR)(EGLDisplay dpy, EGLSyncKHR sync) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLDestroySyncKHRPROC)(EGLDisplay dpy, EGLSyncKHR sync); + static PFNEGLDestroySyncKHRPROC _DestroySyncKHR = (PFNEGLDestroySyncKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::DestroySyncKHR); + return _DestroySyncKHR(dpy, sync); +} +inline EGLint DYNAMICEGL_FUNCTION(ClientWaitSyncKHR)(EGLDisplay dpy, EGLSyncKHR sync, EGLint flags, EGLTimeKHR timeout) +{ + typedef EGLint(EGLAPIENTRY * PFNEGLClientWaitSyncKHRPROC)(EGLDisplay dpy, EGLSyncKHR sync, EGLint flags, EGLTimeKHR timeout); + static PFNEGLClientWaitSyncKHRPROC _ClientWaitSyncKHR = (PFNEGLClientWaitSyncKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::ClientWaitSyncKHR); + return _ClientWaitSyncKHR(dpy, sync, flags, timeout); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(GetSyncAttribKHR)(EGLDisplay dpy, EGLSyncKHR sync, EGLint attribute, EGLint* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLGetSyncAttribKHRPROC)(EGLDisplay dpy, EGLSyncKHR sync, EGLint attribute, EGLint * value); + static PFNEGLGetSyncAttribKHRPROC _GetSyncAttribKHR = (PFNEGLGetSyncAttribKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::GetSyncAttribKHR); + return _GetSyncAttribKHR(dpy, sync, attribute, value); +} +inline EGLImageKHR DYNAMICEGL_FUNCTION(CreateImageKHR)(EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLint* attrib_list) +{ + typedef EGLImageKHR(EGLAPIENTRY * PFNEGLCreateImageKHRPROC)(EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLint * attrib_list); + static PFNEGLCreateImageKHRPROC _CreateImageKHR = (PFNEGLCreateImageKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateImageKHR); + return _CreateImageKHR(dpy, ctx, target, buffer, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(DestroyImageKHR)(EGLDisplay dpy, EGLImageKHR image) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLDestroyImageKHRPROC)(EGLDisplay dpy, EGLImageKHR image); + static PFNEGLDestroyImageKHRPROC _DestroyImageKHR = (PFNEGLDestroyImageKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::DestroyImageKHR); + return _DestroyImageKHR(dpy, image); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(LockSurfaceKHR)(EGLDisplay dpy, EGLSurface surface, const EGLint* attrib_list) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLLockSurfaceKHRPROC)(EGLDisplay dpy, EGLSurface surface, const EGLint * attrib_list); + static PFNEGLLockSurfaceKHRPROC _LockSurfaceKHR = (PFNEGLLockSurfaceKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::LockSurfaceKHR); + return _LockSurfaceKHR(dpy, surface, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(UnlockSurfaceKHR)(EGLDisplay dpy, EGLSurface surface) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLUnlockSurfaceKHRPROC)(EGLDisplay dpy, EGLSurface surface); + static PFNEGLUnlockSurfaceKHRPROC _UnlockSurfaceKHR = (PFNEGLUnlockSurfaceKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::UnlockSurfaceKHR); + return _UnlockSurfaceKHR(dpy, surface); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QuerySurface64KHR)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLAttribKHR* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQuerySurface64KHRPROC)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLAttribKHR * value); + static PFNEGLQuerySurface64KHRPROC _QuerySurface64KHR = (PFNEGLQuerySurface64KHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QuerySurface64KHR); + return _QuerySurface64KHR(dpy, surface, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SetDamageRegionKHR)(EGLDisplay dpy, EGLSurface surface, EGLint* rects, EGLint n_rects) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSetDamageRegionKHRPROC)(EGLDisplay dpy, EGLSurface surface, EGLint * rects, EGLint n_rects); + static PFNEGLSetDamageRegionKHRPROC _SetDamageRegionKHR = (PFNEGLSetDamageRegionKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SetDamageRegionKHR); + return _SetDamageRegionKHR(dpy, surface, rects, n_rects); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SignalSyncKHR)(EGLDisplay dpy, EGLSyncKHR sync, EGLenum mode) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSignalSyncKHRPROC)(EGLDisplay dpy, EGLSyncKHR sync, EGLenum mode); + static PFNEGLSignalSyncKHRPROC _SignalSyncKHR = (PFNEGLSignalSyncKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SignalSyncKHR); + return _SignalSyncKHR(dpy, sync, mode); +} inline EGLStreamKHR DYNAMICEGL_FUNCTION(CreateStreamKHR)(EGLDisplay dpy, const EGLint* attrib_list) +{ + typedef EGLStreamKHR(EGLAPIENTRY * PFNEGLCreateStreamKHRPROC)(EGLDisplay dpy, const EGLint * attrib_list); + static PFNEGLCreateStreamKHRPROC _CreateStreamKHR = (PFNEGLCreateStreamKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateStreamKHR); + return _CreateStreamKHR(dpy, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(DestroyStreamKHR)(EGLDisplay dpy, EGLStreamKHR stream) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLDestroyStreamKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream); + static PFNEGLDestroyStreamKHRPROC _DestroyStreamKHR = (PFNEGLDestroyStreamKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::DestroyStreamKHR); + return _DestroyStreamKHR(dpy, stream); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(StreamAttribKHR)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLStreamAttribKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint value); + static PFNEGLStreamAttribKHRPROC _StreamAttribKHR = (PFNEGLStreamAttribKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::StreamAttribKHR); + return _StreamAttribKHR(dpy, stream, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryStreamKHR)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryStreamKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint * value); + static PFNEGLQueryStreamKHRPROC _QueryStreamKHR = (PFNEGLQueryStreamKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryStreamKHR); + return _QueryStreamKHR(dpy, stream, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryStreamu64KHR)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLuint64KHR* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryStreamu64KHRPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLuint64KHR * value); + static PFNEGLQueryStreamu64KHRPROC _QueryStreamu64KHR = (PFNEGLQueryStreamu64KHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryStreamu64KHR); + return _QueryStreamu64KHR(dpy, stream, attribute, value); +} inline EGLStreamKHR DYNAMICEGL_FUNCTION(CreateStreamAttribKHR)(EGLDisplay dpy, const EGLAttrib* attrib_list) +{ + typedef EGLStreamKHR(EGLAPIENTRY * PFNEGLCreateStreamAttribKHRPROC)(EGLDisplay dpy, const EGLAttrib * attrib_list); + static PFNEGLCreateStreamAttribKHRPROC _CreateStreamAttribKHR = (PFNEGLCreateStreamAttribKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateStreamAttribKHR); + return _CreateStreamAttribKHR(dpy, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SetStreamAttribKHR)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSetStreamAttribKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib value); + static PFNEGLSetStreamAttribKHRPROC _SetStreamAttribKHR = (PFNEGLSetStreamAttribKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SetStreamAttribKHR); + return _SetStreamAttribKHR(dpy, stream, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryStreamAttribKHR)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryStreamAttribKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib * value); + static PFNEGLQueryStreamAttribKHRPROC _QueryStreamAttribKHR = (PFNEGLQueryStreamAttribKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryStreamAttribKHR); + return _QueryStreamAttribKHR(dpy, stream, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(StreamConsumerAcquireAttribKHR)(EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib* attrib_list) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLStreamConsumerAcquireAttribKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib * attrib_list); + static PFNEGLStreamConsumerAcquireAttribKHRPROC _StreamConsumerAcquireAttribKHR = (PFNEGLStreamConsumerAcquireAttribKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::StreamConsumerAcquireAttribKHR); + return _StreamConsumerAcquireAttribKHR(dpy, stream, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(StreamConsumerReleaseAttribKHR)(EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib* attrib_list) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLStreamConsumerReleaseAttribKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib * attrib_list); + static PFNEGLStreamConsumerReleaseAttribKHRPROC _StreamConsumerReleaseAttribKHR = (PFNEGLStreamConsumerReleaseAttribKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::StreamConsumerReleaseAttribKHR); + return _StreamConsumerReleaseAttribKHR(dpy, stream, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(StreamConsumerGLTextureExternalKHR)(EGLDisplay dpy, EGLStreamKHR stream) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLStreamConsumerGLTextureExternalKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream); + static PFNEGLStreamConsumerGLTextureExternalKHRPROC _StreamConsumerGLTextureExternalKHR = (PFNEGLStreamConsumerGLTextureExternalKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::StreamConsumerGLTextureExternalKHR); + return _StreamConsumerGLTextureExternalKHR(dpy, stream); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(StreamConsumerAcquireKHR)(EGLDisplay dpy, EGLStreamKHR stream) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLStreamConsumerAcquireKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream); + static PFNEGLStreamConsumerAcquireKHRPROC _StreamConsumerAcquireKHR = (PFNEGLStreamConsumerAcquireKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::StreamConsumerAcquireKHR); + return _StreamConsumerAcquireKHR(dpy, stream); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(StreamConsumerReleaseKHR)(EGLDisplay dpy, EGLStreamKHR stream) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLStreamConsumerReleaseKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream); + static PFNEGLStreamConsumerReleaseKHRPROC _StreamConsumerReleaseKHR = (PFNEGLStreamConsumerReleaseKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::StreamConsumerReleaseKHR); + return _StreamConsumerReleaseKHR(dpy, stream); +} +inline EGLNativeFileDescriptorKHR DYNAMICEGL_FUNCTION(GetStreamFileDescriptorKHR)(EGLDisplay dpy, EGLStreamKHR stream) +{ + typedef EGLNativeFileDescriptorKHR(EGLAPIENTRY * PFNEGLGetStreamFileDescriptorKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream); + static PFNEGLGetStreamFileDescriptorKHRPROC _GetStreamFileDescriptorKHR = (PFNEGLGetStreamFileDescriptorKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::GetStreamFileDescriptorKHR); + return _GetStreamFileDescriptorKHR(dpy, stream); +} +inline EGLStreamKHR DYNAMICEGL_FUNCTION(CreateStreamFromFileDescriptorKHR)(EGLDisplay dpy, EGLNativeFileDescriptorKHR file_descriptor) +{ + typedef EGLStreamKHR(EGLAPIENTRY * PFNEGLCreateStreamFromFileDescriptorKHRPROC)(EGLDisplay dpy, EGLNativeFileDescriptorKHR file_descriptor); + static PFNEGLCreateStreamFromFileDescriptorKHRPROC _CreateStreamFromFileDescriptorKHR = (PFNEGLCreateStreamFromFileDescriptorKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateStreamFromFileDescriptorKHR); + return _CreateStreamFromFileDescriptorKHR(dpy, file_descriptor); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryStreamTimeKHR)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLTimeKHR* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryStreamTimeKHRPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLTimeKHR * value); + static PFNEGLQueryStreamTimeKHRPROC _QueryStreamTimeKHR = (PFNEGLQueryStreamTimeKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryStreamTimeKHR); + return _QueryStreamTimeKHR(dpy, stream, attribute, value); +} inline EGLSurface DYNAMICEGL_FUNCTION(CreateStreamProducerSurfaceKHR)(EGLDisplay dpy, EGLConfig config, EGLStreamKHR stream, const EGLint* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PFNEGLCreateStreamProducerSurfaceKHRPROC)(EGLDisplay dpy, EGLConfig config, EGLStreamKHR stream, const EGLint * attrib_list); + static PFNEGLCreateStreamProducerSurfaceKHRPROC _CreateStreamProducerSurfaceKHR = (PFNEGLCreateStreamProducerSurfaceKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateStreamProducerSurfaceKHR); + return _CreateStreamProducerSurfaceKHR(dpy, config, stream, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SwapBuffersWithDamageKHR)(EGLDisplay dpy, EGLSurface surface, EGLint* rects, EGLint n_rects) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSwapBuffersWithDamageKHRPROC)(EGLDisplay dpy, EGLSurface surface, EGLint * rects, EGLint n_rects); + static PFNEGLSwapBuffersWithDamageKHRPROC _SwapBuffersWithDamageKHR = (PFNEGLSwapBuffersWithDamageKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SwapBuffersWithDamageKHR); + return _SwapBuffersWithDamageKHR(dpy, surface, rects, n_rects); +} +inline EGLint DYNAMICEGL_FUNCTION(WaitSyncKHR)(EGLDisplay dpy, EGLSyncKHR sync, EGLint flags) +{ + typedef EGLint(EGLAPIENTRY * PFNEGLWaitSyncKHRPROC)(EGLDisplay dpy, EGLSyncKHR sync, EGLint flags); + static PFNEGLWaitSyncKHRPROC _WaitSyncKHR = (PFNEGLWaitSyncKHRPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::WaitSyncKHR); + return _WaitSyncKHR(dpy, sync, flags); +} +inline void DYNAMICEGL_FUNCTION(SetBlobCacheFuncsANDROID)(EGLDisplay dpy, EGLSetBlobFuncANDROID set, EGLGetBlobFuncANDROID get) +{ + typedef void (EGLAPIENTRY * PFNEGLSetBlobCacheFuncsANDROIDPROC)(EGLDisplay dpy, EGLSetBlobFuncANDROID set, EGLGetBlobFuncANDROID get); + static PFNEGLSetBlobCacheFuncsANDROIDPROC _SetBlobCacheFuncsANDROID = (PFNEGLSetBlobCacheFuncsANDROIDPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SetBlobCacheFuncsANDROID); + return _SetBlobCacheFuncsANDROID(dpy, set, get); +} +inline EGLClientBuffer DYNAMICEGL_FUNCTION(CreateNativeClientBufferANDROID)(const EGLint* attrib_list) +{ + typedef EGLClientBuffer(EGLAPIENTRY * PFNEGLCreateNativeClientBufferANDROIDPROC)(const EGLint * attrib_list); + static PFNEGLCreateNativeClientBufferANDROIDPROC _CreateNativeClientBufferANDROID = (PFNEGLCreateNativeClientBufferANDROIDPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateNativeClientBufferANDROID); + return _CreateNativeClientBufferANDROID(attrib_list); +} +inline EGLint DYNAMICEGL_FUNCTION(DupNativeFenceFDANDROID)(EGLDisplay dpy, EGLSyncKHR sync) +{ + typedef EGLint(EGLAPIENTRY * PFNEGLDupNativeFenceFDANDROIDPROC)(EGLDisplay dpy, EGLSyncKHR sync); + static PFNEGLDupNativeFenceFDANDROIDPROC _DupNativeFenceFDANDROID = (PFNEGLDupNativeFenceFDANDROIDPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::DupNativeFenceFDANDROID); + return _DupNativeFenceFDANDROID(dpy, sync); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(PresentationTimeANDROID)(EGLDisplay dpy, EGLSurface surface, EGLnsecsANDROID time) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLPresentationTimeANDROIDPROC)(EGLDisplay dpy, EGLSurface surface, EGLnsecsANDROID time); + static PFNEGLPresentationTimeANDROIDPROC _PresentationTimeANDROID = (PFNEGLPresentationTimeANDROIDPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::PresentationTimeANDROID); + return _PresentationTimeANDROID(dpy, surface, time); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QuerySurfacePointerANGLE)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, void** value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQuerySurfacePointerANGLEPROC)(EGLDisplay dpy, EGLSurface surface, EGLint attribute, void** value); + static PFNEGLQuerySurfacePointerANGLEPROC _QuerySurfacePointerANGLE = (PFNEGLQuerySurfacePointerANGLEPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QuerySurfacePointerANGLE); + return _QuerySurfacePointerANGLE(dpy, surface, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(CompositorSetContextListEXT)(const EGLint* external_ref_ids, EGLint num_entries) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLCompositorSetContextListEXTPROC)(const EGLint * external_ref_ids, EGLint num_entries); + static PFNEGLCompositorSetContextListEXTPROC _CompositorSetContextListEXT = (PFNEGLCompositorSetContextListEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CompositorSetContextListEXT); + return _CompositorSetContextListEXT(external_ref_ids, num_entries); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(CompositorSetContextAttributesEXT)(EGLint external_ref_id, const EGLint* context_attributes, EGLint num_entries) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLCompositorSetContextAttributesEXTPROC)(EGLint external_ref_id, const EGLint * context_attributes, EGLint num_entries); + static PFNEGLCompositorSetContextAttributesEXTPROC _CompositorSetContextAttributesEXT = (PFNEGLCompositorSetContextAttributesEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CompositorSetContextAttributesEXT); + return _CompositorSetContextAttributesEXT(external_ref_id, context_attributes, num_entries); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(CompositorSetWindowListEXT)(EGLint external_ref_id, const EGLint* external_win_ids, EGLint num_entries) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLCompositorSetWindowListEXTPROC)(EGLint external_ref_id, const EGLint * external_win_ids, EGLint num_entries); + static PFNEGLCompositorSetWindowListEXTPROC _CompositorSetWindowListEXT = (PFNEGLCompositorSetWindowListEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CompositorSetWindowListEXT); + return _CompositorSetWindowListEXT(external_ref_id, external_win_ids, num_entries); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(CompositorSetWindowAttributesEXT)(EGLint external_win_id, const EGLint* window_attributes, EGLint num_entries) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLCompositorSetWindowAttributesEXTPROC)(EGLint external_win_id, const EGLint * window_attributes, EGLint num_entries); + static PFNEGLCompositorSetWindowAttributesEXTPROC _CompositorSetWindowAttributesEXT = (PFNEGLCompositorSetWindowAttributesEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CompositorSetWindowAttributesEXT); + return _CompositorSetWindowAttributesEXT(external_win_id, window_attributes, num_entries); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(CompositorBindTexWindowEXT)(EGLint external_win_id) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLCompositorBindTexWindowEXTPROC)(EGLint external_win_id); + static PFNEGLCompositorBindTexWindowEXTPROC _CompositorBindTexWindowEXT = (PFNEGLCompositorBindTexWindowEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CompositorBindTexWindowEXT); + return _CompositorBindTexWindowEXT(external_win_id); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(CompositorSetSizeEXT)(EGLint external_win_id, EGLint width, EGLint height) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLCompositorSetSizeEXTPROC)(EGLint external_win_id, EGLint width, EGLint height); + static PFNEGLCompositorSetSizeEXTPROC _CompositorSetSizeEXT = (PFNEGLCompositorSetSizeEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CompositorSetSizeEXT); + return _CompositorSetSizeEXT(external_win_id, width, height); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(CompositorSwapPolicyEXT)(EGLint external_win_id, EGLint policy) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLCompositorSwapPolicyEXTPROC)(EGLint external_win_id, EGLint policy); + static PFNEGLCompositorSwapPolicyEXTPROC _CompositorSwapPolicyEXT = (PFNEGLCompositorSwapPolicyEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CompositorSwapPolicyEXT); + return _CompositorSwapPolicyEXT(external_win_id, policy); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryDeviceAttribEXT)(EGLDeviceEXT device, EGLint attribute, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryDeviceAttribEXTPROC)(EGLDeviceEXT device, EGLint attribute, EGLAttrib * value); + static PFNEGLQueryDeviceAttribEXTPROC _QueryDeviceAttribEXT = (PFNEGLQueryDeviceAttribEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDeviceAttribEXT); + return _QueryDeviceAttribEXT(device, attribute, value); +} +inline const char* DYNAMICEGL_FUNCTION(QueryDeviceStringEXT)(EGLDeviceEXT device, EGLint name) +{ + typedef const char* (EGLAPIENTRY * PFNEGLQueryDeviceStringEXTPROC)(EGLDeviceEXT device, EGLint name); + static PFNEGLQueryDeviceStringEXTPROC _QueryDeviceStringEXT = (PFNEGLQueryDeviceStringEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDeviceStringEXT); + return _QueryDeviceStringEXT(device, name); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryDevicesEXT)(EGLint max_devices, EGLDeviceEXT* devices, EGLint* num_devices) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryDevicesEXTPROC)(EGLint max_devices, EGLDeviceEXT * devices, EGLint * num_devices); + static PFNEGLQueryDevicesEXTPROC _QueryDevicesEXT = (PFNEGLQueryDevicesEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDevicesEXT); + return _QueryDevicesEXT(max_devices, devices, num_devices); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryDisplayAttribEXT)(EGLDisplay dpy, EGLint attribute, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryDisplayAttribEXTPROC)(EGLDisplay dpy, EGLint attribute, EGLAttrib * value); + static PFNEGLQueryDisplayAttribEXTPROC _QueryDisplayAttribEXT = (PFNEGLQueryDisplayAttribEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDisplayAttribEXT); + return _QueryDisplayAttribEXT(dpy, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryDmaBufFormatsEXT)(EGLDisplay dpy, EGLint max_formats, EGLint* formats, EGLint* num_formats) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryDmaBufFormatsEXTPROC)(EGLDisplay dpy, EGLint max_formats, EGLint * formats, EGLint * num_formats); + static PFNEGLQueryDmaBufFormatsEXTPROC _QueryDmaBufFormatsEXT = (PFNEGLQueryDmaBufFormatsEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDmaBufFormatsEXT); + return _QueryDmaBufFormatsEXT(dpy, max_formats, formats, num_formats); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryDmaBufModifiersEXT)(EGLDisplay dpy, EGLint format, EGLint max_modifiers, EGLuint64KHR* modifiers, EGLBoolean* external_only, EGLint* num_modifiers) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryDmaBufModifiersEXTPROC)(EGLDisplay dpy, EGLint format, EGLint max_modifiers, EGLuint64KHR * modifiers, EGLBoolean * external_only, EGLint * num_modifiers); + static PFNEGLQueryDmaBufModifiersEXTPROC _QueryDmaBufModifiersEXT = (PFNEGLQueryDmaBufModifiersEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDmaBufModifiersEXT); + return _QueryDmaBufModifiersEXT(dpy, format, max_modifiers, modifiers, external_only, num_modifiers); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(GetOutputLayersEXT)(EGLDisplay dpy, const EGLAttrib* attrib_list, EGLOutputLayerEXT* layers, EGLint max_layers, EGLint* num_layers) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLGetOutputLayersEXTPROC)(EGLDisplay dpy, const EGLAttrib * attrib_list, EGLOutputLayerEXT * layers, EGLint max_layers, EGLint * num_layers); + static PFNEGLGetOutputLayersEXTPROC _GetOutputLayersEXT = (PFNEGLGetOutputLayersEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::GetOutputLayersEXT); + return _GetOutputLayersEXT(dpy, attrib_list, layers, max_layers, num_layers); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(GetOutputPortsEXT)(EGLDisplay dpy, const EGLAttrib* attrib_list, EGLOutputPortEXT* ports, EGLint max_ports, EGLint* num_ports) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLGetOutputPortsEXTPROC)(EGLDisplay dpy, const EGLAttrib * attrib_list, EGLOutputPortEXT * ports, EGLint max_ports, EGLint * num_ports); + static PFNEGLGetOutputPortsEXTPROC _GetOutputPortsEXT = (PFNEGLGetOutputPortsEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::GetOutputPortsEXT); + return _GetOutputPortsEXT(dpy, attrib_list, ports, max_ports, num_ports); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(OutputLayerAttribEXT)(EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLOutputLayerAttribEXTPROC)(EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib value); + static PFNEGLOutputLayerAttribEXTPROC _OutputLayerAttribEXT = (PFNEGLOutputLayerAttribEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::OutputLayerAttribEXT); + return _OutputLayerAttribEXT(dpy, layer, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryOutputLayerAttribEXT)(EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryOutputLayerAttribEXTPROC)(EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib * value); + static PFNEGLQueryOutputLayerAttribEXTPROC _QueryOutputLayerAttribEXT = (PFNEGLQueryOutputLayerAttribEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryOutputLayerAttribEXT); + return _QueryOutputLayerAttribEXT(dpy, layer, attribute, value); +} +inline const char* DYNAMICEGL_FUNCTION(QueryOutputLayerStringEXT)(EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint name) +{ + typedef const char* (EGLAPIENTRY * PFNEGLQueryOutputLayerStringEXTPROC)(EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint name); + static PFNEGLQueryOutputLayerStringEXTPROC _QueryOutputLayerStringEXT = (PFNEGLQueryOutputLayerStringEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryOutputLayerStringEXT); + return _QueryOutputLayerStringEXT(dpy, layer, name); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(OutputPortAttribEXT)(EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLOutputPortAttribEXTPROC)(EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib value); + static PFNEGLOutputPortAttribEXTPROC _OutputPortAttribEXT = (PFNEGLOutputPortAttribEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::OutputPortAttribEXT); + return _OutputPortAttribEXT(dpy, port, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryOutputPortAttribEXT)(EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryOutputPortAttribEXTPROC)(EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib * value); + static PFNEGLQueryOutputPortAttribEXTPROC _QueryOutputPortAttribEXT = (PFNEGLQueryOutputPortAttribEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryOutputPortAttribEXT); + return _QueryOutputPortAttribEXT(dpy, port, attribute, value); +} +inline const char* DYNAMICEGL_FUNCTION(QueryOutputPortStringEXT)(EGLDisplay dpy, EGLOutputPortEXT port, EGLint name) +{ + typedef const char* (EGLAPIENTRY * PFNEGLQueryOutputPortStringEXTPROC)(EGLDisplay dpy, EGLOutputPortEXT port, EGLint name); + static PFNEGLQueryOutputPortStringEXTPROC _QueryOutputPortStringEXT = (PFNEGLQueryOutputPortStringEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryOutputPortStringEXT); + return _QueryOutputPortStringEXT(dpy, port, name); +} +inline EGLDisplay DYNAMICEGL_FUNCTION(GetPlatformDisplayEXT)(EGLenum platform, void* native_display, const EGLint* attrib_list) +{ + typedef EGLDisplay(EGLAPIENTRY * PFNEGLGetPlatformDisplayEXTPROC)(EGLenum platform, void* native_display, const EGLint * attrib_list); + static PFNEGLGetPlatformDisplayEXTPROC _GetPlatformDisplayEXT = (PFNEGLGetPlatformDisplayEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::GetPlatformDisplayEXT); + return _GetPlatformDisplayEXT(platform, native_display, attrib_list); +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreatePlatformWindowSurfaceEXT)(EGLDisplay dpy, EGLConfig config, void* native_window, const EGLint* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PFNEGLCreatePlatformWindowSurfaceEXTPROC)(EGLDisplay dpy, EGLConfig config, void* native_window, const EGLint * attrib_list); + static PFNEGLCreatePlatformWindowSurfaceEXTPROC _CreatePlatformWindowSurfaceEXT = (PFNEGLCreatePlatformWindowSurfaceEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreatePlatformWindowSurfaceEXT); + return _CreatePlatformWindowSurfaceEXT(dpy, config, native_window, attrib_list); +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreatePlatformPixmapSurfaceEXT)(EGLDisplay dpy, EGLConfig config, void* native_pixmap, const EGLint* attrib_list) +{ + typedef EGLSurface(EGLAPIENTRY * PFNEGLCreatePlatformPixmapSurfaceEXTPROC)(EGLDisplay dpy, EGLConfig config, void* native_pixmap, const EGLint * attrib_list); + static PFNEGLCreatePlatformPixmapSurfaceEXTPROC _CreatePlatformPixmapSurfaceEXT = (PFNEGLCreatePlatformPixmapSurfaceEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreatePlatformPixmapSurfaceEXT); + return _CreatePlatformPixmapSurfaceEXT(dpy, config, native_pixmap, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(StreamConsumerOutputEXT)(EGLDisplay dpy, EGLStreamKHR stream, EGLOutputLayerEXT layer) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLStreamConsumerOutputEXTPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLOutputLayerEXT layer); + static PFNEGLStreamConsumerOutputEXTPROC _StreamConsumerOutputEXT = (PFNEGLStreamConsumerOutputEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::StreamConsumerOutputEXT); + return _StreamConsumerOutputEXT(dpy, stream, layer); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SwapBuffersWithDamageEXT)(EGLDisplay dpy, EGLSurface surface, EGLint* rects, EGLint n_rects) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSwapBuffersWithDamageEXTPROC)(EGLDisplay dpy, EGLSurface surface, EGLint * rects, EGLint n_rects); + static PFNEGLSwapBuffersWithDamageEXTPROC _SwapBuffersWithDamageEXT = (PFNEGLSwapBuffersWithDamageEXTPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SwapBuffersWithDamageEXT); + return _SwapBuffersWithDamageEXT(dpy, surface, rects, n_rects); +} +inline EGLSurface DYNAMICEGL_FUNCTION(CreatePixmapSurfaceHI)(EGLDisplay dpy, EGLConfig config, struct EGLClientPixmapHI* pixmap) +{ + typedef EGLSurface(EGLAPIENTRY * PFNEGLCreatePixmapSurfaceHIPROC)(EGLDisplay dpy, EGLConfig config, struct EGLClientPixmapHI * pixmap); + static PFNEGLCreatePixmapSurfaceHIPROC _CreatePixmapSurfaceHI = (PFNEGLCreatePixmapSurfaceHIPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreatePixmapSurfaceHI); + return _CreatePixmapSurfaceHI(dpy, config, pixmap); +} +inline EGLImageKHR DYNAMICEGL_FUNCTION(CreateDRMImageMESA)(EGLDisplay dpy, const EGLint* attrib_list) +{ + typedef EGLImageKHR(EGLAPIENTRY * PFNEGLCreateDRMImageMESAPROC)(EGLDisplay dpy, const EGLint * attrib_list); + static PFNEGLCreateDRMImageMESAPROC _CreateDRMImageMESA = (PFNEGLCreateDRMImageMESAPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateDRMImageMESA); + return _CreateDRMImageMESA(dpy, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(ExportDRMImageMESA)(EGLDisplay dpy, EGLImageKHR image, EGLint* name, EGLint* handle, EGLint* stride) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLExportDRMImageMESAPROC)(EGLDisplay dpy, EGLImageKHR image, EGLint * name, EGLint * handle, EGLint * stride); + static PFNEGLExportDRMImageMESAPROC _ExportDRMImageMESA = (PFNEGLExportDRMImageMESAPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::ExportDRMImageMESA); + return _ExportDRMImageMESA(dpy, image, name, handle, stride); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(ExportDMABUFImageQueryMESA)(EGLDisplay dpy, EGLImageKHR image, int* fourcc, int* num_planes, EGLuint64KHR* modifiers) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLExportDMABUFImageQueryMESAPROC)(EGLDisplay dpy, EGLImageKHR image, int* fourcc, int* num_planes, EGLuint64KHR * modifiers); + static PFNEGLExportDMABUFImageQueryMESAPROC _ExportDMABUFImageQueryMESA = (PFNEGLExportDMABUFImageQueryMESAPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::ExportDMABUFImageQueryMESA); + return _ExportDMABUFImageQueryMESA(dpy, image, fourcc, num_planes, modifiers); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(ExportDMABUFImageMESA)(EGLDisplay dpy, EGLImageKHR image, int* fds, EGLint* strides, EGLint* offsets) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLExportDMABUFImageMESAPROC)(EGLDisplay dpy, EGLImageKHR image, int* fds, EGLint * strides, EGLint * offsets); + static PFNEGLExportDMABUFImageMESAPROC _ExportDMABUFImageMESA = (PFNEGLExportDMABUFImageMESAPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::ExportDMABUFImageMESA); + return _ExportDMABUFImageMESA(dpy, image, fds, strides, offsets); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SwapBuffersRegionNOK)(EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint* rects) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSwapBuffersRegionNOKPROC)(EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint * rects); + static PFNEGLSwapBuffersRegionNOKPROC _SwapBuffersRegionNOK = (PFNEGLSwapBuffersRegionNOKPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SwapBuffersRegionNOK); + return _SwapBuffersRegionNOK(dpy, surface, numRects, rects); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SwapBuffersRegion2NOK)(EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint* rects) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSwapBuffersRegion2NOKPROC)(EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint * rects); + static PFNEGLSwapBuffersRegion2NOKPROC _SwapBuffersRegion2NOK = (PFNEGLSwapBuffersRegion2NOKPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SwapBuffersRegion2NOK); + return _SwapBuffersRegion2NOK(dpy, surface, numRects, rects); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryNativeDisplayNV)(EGLDisplay dpy, EGLNativeDisplayType* display_id) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryNativeDisplayNVPROC)(EGLDisplay dpy, EGLNativeDisplayType * display_id); + static PFNEGLQueryNativeDisplayNVPROC _QueryNativeDisplayNV = (PFNEGLQueryNativeDisplayNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryNativeDisplayNV); + return _QueryNativeDisplayNV(dpy, display_id); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryNativeWindowNV)(EGLDisplay dpy, EGLSurface surf, EGLNativeWindowType* window) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryNativeWindowNVPROC)(EGLDisplay dpy, EGLSurface surf, EGLNativeWindowType * window); + static PFNEGLQueryNativeWindowNVPROC _QueryNativeWindowNV = (PFNEGLQueryNativeWindowNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryNativeWindowNV); + return _QueryNativeWindowNV(dpy, surf, window); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryNativePixmapNV)(EGLDisplay dpy, EGLSurface surf, EGLNativePixmapType* pixmap) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryNativePixmapNVPROC)(EGLDisplay dpy, EGLSurface surf, EGLNativePixmapType * pixmap); + static PFNEGLQueryNativePixmapNVPROC _QueryNativePixmapNV = (PFNEGLQueryNativePixmapNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryNativePixmapNV); + return _QueryNativePixmapNV(dpy, surf, pixmap); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(PostSubBufferNV)(EGLDisplay dpy, EGLSurface surface, EGLint x, EGLint y, EGLint width, EGLint height) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLPostSubBufferNVPROC)(EGLDisplay dpy, EGLSurface surface, EGLint x, EGLint y, EGLint width, EGLint height); + static PFNEGLPostSubBufferNVPROC _PostSubBufferNV = (PFNEGLPostSubBufferNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::PostSubBufferNV); + return _PostSubBufferNV(dpy, surface, x, y, width, height); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(StreamConsumerGLTextureExternalAttribsNV)(EGLDisplay dpy, EGLStreamKHR stream, EGLAttrib* attrib_list) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLStreamConsumerGLTextureExternalAttribsNVPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLAttrib * attrib_list); + static PFNEGLStreamConsumerGLTextureExternalAttribsNVPROC _StreamConsumerGLTextureExternalAttribsNV = (PFNEGLStreamConsumerGLTextureExternalAttribsNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::StreamConsumerGLTextureExternalAttribsNV); + return _StreamConsumerGLTextureExternalAttribsNV(dpy, stream, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryDisplayAttribNV)(EGLDisplay dpy, EGLint attribute, EGLAttrib* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryDisplayAttribNVPROC)(EGLDisplay dpy, EGLint attribute, EGLAttrib * value); + static PFNEGLQueryDisplayAttribNVPROC _QueryDisplayAttribNV = (PFNEGLQueryDisplayAttribNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryDisplayAttribNV); + return _QueryDisplayAttribNV(dpy, attribute, value); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SetStreamMetadataNV)(EGLDisplay dpy, EGLStreamKHR stream, EGLint n, EGLint offset, EGLint size, const void* data) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSetStreamMetadataNVPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLint n, EGLint offset, EGLint size, const void* data); + static PFNEGLSetStreamMetadataNVPROC _SetStreamMetadataNV = (PFNEGLSetStreamMetadataNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SetStreamMetadataNV); + return _SetStreamMetadataNV(dpy, stream, n, offset, size, data); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(QueryStreamMetadataNV)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum name, EGLint n, EGLint offset, EGLint size, void* data) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLQueryStreamMetadataNVPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum name, EGLint n, EGLint offset, EGLint size, void* data); + static PFNEGLQueryStreamMetadataNVPROC _QueryStreamMetadataNV = (PFNEGLQueryStreamMetadataNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::QueryStreamMetadataNV); + return _QueryStreamMetadataNV(dpy, stream, name, n, offset, size, data); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(ResetStreamNV)(EGLDisplay dpy, EGLStreamKHR stream) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLResetStreamNVPROC)(EGLDisplay dpy, EGLStreamKHR stream); + static PFNEGLResetStreamNVPROC _ResetStreamNV = (PFNEGLResetStreamNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::ResetStreamNV); + return _ResetStreamNV(dpy, stream); +} +inline EGLSyncKHR DYNAMICEGL_FUNCTION(CreateStreamSyncNV)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum type, const EGLint* attrib_list) +{ + typedef EGLSyncKHR(EGLAPIENTRY * PFNEGLCreateStreamSyncNVPROC)(EGLDisplay dpy, EGLStreamKHR stream, EGLenum type, const EGLint * attrib_list); + static PFNEGLCreateStreamSyncNVPROC _CreateStreamSyncNV = (PFNEGLCreateStreamSyncNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateStreamSyncNV); + return _CreateStreamSyncNV(dpy, stream, type, attrib_list); +} +inline EGLSyncNV DYNAMICEGL_FUNCTION(CreateFenceSyncNV)(EGLDisplay dpy, EGLenum condition, const EGLint* attrib_list) +{ + typedef EGLSyncNV(EGLAPIENTRY * PFNEGLCreateFenceSyncNVPROC)(EGLDisplay dpy, EGLenum condition, const EGLint * attrib_list); + static PFNEGLCreateFenceSyncNVPROC _CreateFenceSyncNV = (PFNEGLCreateFenceSyncNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::CreateFenceSyncNV); + return _CreateFenceSyncNV(dpy, condition, attrib_list); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(DestroySyncNV)(EGLSyncNV sync) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLDestroySyncNVPROC)(EGLSyncNV sync); + static PFNEGLDestroySyncNVPROC _DestroySyncNV = (PFNEGLDestroySyncNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::DestroySyncNV); + return _DestroySyncNV(sync); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(FenceNV)(EGLSyncNV sync) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLFenceNVPROC)(EGLSyncNV sync); + static PFNEGLFenceNVPROC _FenceNV = (PFNEGLFenceNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::FenceNV); + return _FenceNV(sync); +} +inline EGLint DYNAMICEGL_FUNCTION(ClientWaitSyncNV)(EGLSyncNV sync, EGLint flags, EGLTimeNV timeout) +{ + typedef EGLint(EGLAPIENTRY * PFNEGLClientWaitSyncNVPROC)(EGLSyncNV sync, EGLint flags, EGLTimeNV timeout); + static PFNEGLClientWaitSyncNVPROC _ClientWaitSyncNV = (PFNEGLClientWaitSyncNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::ClientWaitSyncNV); + return _ClientWaitSyncNV(sync, flags, timeout); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(SignalSyncNV)(EGLSyncNV sync, EGLenum mode) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLSignalSyncNVPROC)(EGLSyncNV sync, EGLenum mode); + static PFNEGLSignalSyncNVPROC _SignalSyncNV = (PFNEGLSignalSyncNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::SignalSyncNV); + return _SignalSyncNV(sync, mode); +} +inline EGLBoolean DYNAMICEGL_FUNCTION(GetSyncAttribNV)(EGLSyncNV sync, EGLint attribute, EGLint* value) +{ + typedef EGLBoolean(EGLAPIENTRY * PFNEGLGetSyncAttribNVPROC)(EGLSyncNV sync, EGLint attribute, EGLint * value); + static PFNEGLGetSyncAttribNVPROC _GetSyncAttribNV = (PFNEGLGetSyncAttribNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::GetSyncAttribNV); + return _GetSyncAttribNV(sync, attribute, value); +} +inline EGLuint64NV DYNAMICEGL_FUNCTION(GetSystemTimeFrequencyNV)(void) +{ + typedef EGLuint64NV(EGLAPIENTRY * PFNEGLGetSystemTimeFrequencyNVPROC)(void); + static PFNEGLGetSystemTimeFrequencyNVPROC _GetSystemTimeFrequencyNV = (PFNEGLGetSystemTimeFrequencyNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::GetSystemTimeFrequencyNV); + return _GetSystemTimeFrequencyNV(); +} +inline EGLuint64NV DYNAMICEGL_FUNCTION(GetSystemTimeNV)(void) +{ + typedef EGLuint64NV(EGLAPIENTRY * PFNEGLGetSystemTimeNVPROC)(void); + static PFNEGLGetSystemTimeNVPROC _GetSystemTimeNV = (PFNEGLGetSystemTimeNVPROC)egl::internal::getEglExtFunction(egl::internal::EglExtFuncName::GetSystemTimeNV); + return _GetSystemTimeNV(); +} +#ifndef DYNAMICEGL_NO_NAMESPACE +} +#endif + +inline bool isEglExtensionSupported(const char* extensionName, bool resetExtensionCache = false) +{ + static const char* extensionString = DYNAMICEGL_CALL_FUNCTION(QueryString)(DYNAMICEGL_CALL_FUNCTION(GetCurrentDisplay)(), EGL_EXTENSIONS); + + if (resetExtensionCache) { extensionString = DYNAMICEGL_CALL_FUNCTION(QueryString)(DYNAMICEGL_CALL_FUNCTION(GetCurrentDisplay)(), EGL_EXTENSIONS); } + return egl::internal::isExtensionSupported(extensionString, extensionName); +} +inline bool isEglExtensionSupported(EGLDisplay display, const char* extensionName) +{ + const char* extensionString = DYNAMICEGL_CALL_FUNCTION(QueryString)(display, EGL_EXTENSIONS); + return egl::internal::isExtensionSupported(extensionString, extensionName); +} +#ifndef DYNAMICEGL_NO_NAMESPACE +} +#endif +#undef _Log +#endif diff --git a/sources/pvrsdk/Include/DynamicGles.h b/sources/pvrsdk/Include/DynamicGles.h new file mode 100755 index 00000000..5841c3d8 --- /dev/null +++ b/sources/pvrsdk/Include/DynamicGles.h @@ -0,0 +1,5498 @@ +#pragma once +#include "DynamicEgl.h" +#include "pvr_openlib.h" + +#ifdef GL_PROTOTYPES +#undef GL_PROTOTYPES +#endif +#define GL_NO_PROTOTYPES +#define EGL_NO_PROTOTYPES +#if defined( _WIN32 ) +#define WIN32_LEAN_AND_MEAN +#define NOMINMAX +#include +#endif +#include +#include +#include + +// DETERMINE FEATURE LEVEL +#if defined(DYNAMICGLES_GLES31) +#define DYNAMICGLES_ENABLE_GLES31 +#define DYNAMICGLES_ENABLE_GLES3 +#define DYNAMICGLES_ENABLE_GLES2 +#elif defined(DYNAMICGLES_GLES30) +#define DYNAMICGLES_ENABLE_GLES30 +#define DYNAMICGLES_ENABLE_GLES2 +#elif defined(DYNAMICGLES_GLES20) +#define DYNAMICGLES_ENABLE_GLES2 +#else +#if !defined(DYNAMICGLES_GLES32) +#define DYNAMICGLES_GLES32 +#endif +#define DYNAMICGLES_ENABLE_GLES32 +#define DYNAMICGLES_ENABLE_GLES31 +#define DYNAMICGLES_ENABLE_GLES3 +#define DYNAMICGLES_ENABLE_GLES2 +#endif + +// DETERMINE IF FUNCTIONS SHOULD BE PREPENDED WITH gl OR PUT IN gl:: NAMESPACE (DEFAULT IS NAMESPACE) +#ifndef DYNAMICGLES_NO_NAMESPACE +#define DYNAMICGLES_FUNCTION(name) GL_APIENTRY name +#else +#if TARGET_OS_IPHONE +#define DYNAMICGLES_FUNCTION(name) GL_APIENTRY name +#else +#define DYNAMICGLES_FUNCTION(name) GL_APIENTRY gl##name +#endif +#endif + +// DYNAMICGLES_FUNCTION THE PLATFORM SPECIFIC LIBRARY NAME +namespace gl { +namespace internals { +#ifdef _WIN32 +static const char* libName = "libGLESv2.dll"; +#elif defined(__APPLE__) +static const char* libName = "libGLESv2.dylib"; +#else +static const char* libName = "libGLESv2.so"; +#endif +} +} + +// LOAD HEADER FILES DEPENDING ON FEATURE LEVEL +#if !TARGET_OS_IPHONE +#include "GLES3/gl32.h" +#include "GLES2/gl2ext.h" +#else +#include +#include +#include +#endif + +namespace gl { +namespace internals { +namespace Gl31FuncName { +// KEEP A LIST USED TO REFERENCE THE FUNCTIONS +enum OpenGLES31FunctionName +{ + //// OPENGL ES 31 //// + + DispatchCompute, DispatchComputeIndirect, DrawArraysIndirect, DrawElementsIndirect, FramebufferParameteri, GetFramebufferParameteriv, GetProgramInterfaceiv, + GetProgramResourceIndex, GetProgramResourceName, GetProgramResourceiv, GetProgramResourceLocation, UseProgramStages, ActiveShaderProgram, CreateShaderProgramv, + BindProgramPipeline, DeleteProgramPipelines, GenProgramPipelines, IsProgramPipeline, GetProgramPipelineiv, ProgramUniform1i, ProgramUniform2i, ProgramUniform3i, + ProgramUniform4i, ProgramUniform1ui, ProgramUniform2ui, ProgramUniform3ui, ProgramUniform4ui, ProgramUniform1f, ProgramUniform2f, ProgramUniform3f, + ProgramUniform4f, ProgramUniform1iv, ProgramUniform2iv, ProgramUniform3iv, ProgramUniform4iv, ProgramUniform1uiv, ProgramUniform2uiv, ProgramUniform3uiv, + ProgramUniform4uiv, ProgramUniform1fv, ProgramUniform2fv, ProgramUniform3fv, ProgramUniform4fv, ProgramUniformMatrix2fv, ProgramUniformMatrix3fv, + ProgramUniformMatrix4fv, ProgramUniformMatrix2x3fv, ProgramUniformMatrix3x2fv, ProgramUniformMatrix2x4fv, ProgramUniformMatrix4x2fv, ProgramUniformMatrix3x4fv, + ProgramUniformMatrix4x3fv, ValidateProgramPipeline, GetProgramPipelineInfoLog, BindImageTexture, GetBooleani_v, MemoryBarrier, MemoryBarrierByRegion, + TexStorage2DMultisample, GetMultisamplefv, SampleMaski, GetTexLevelParameteriv, GetTexLevelParameterfv, BindVertexBuffer, VertexAttribFormat, VertexAttribIFormat, + VertexAttribBinding, VertexBindingDivisor, + NUMBER_OF_OPENGLES3_FUNCTIONS +}; +} + +// THIS FUNCTION PRELOADS ALL ES POINTERS ON FIRST CALL +inline void* getEs31Function(gl::internals::Gl31FuncName::OpenGLES31FunctionName funcname) +{ + static void* FunctionTable[Gl31FuncName::NUMBER_OF_OPENGLES3_FUNCTIONS]; + + // GET FUNCTION POINTERS --- ONCE!!!! /// +#if !TARGET_OS_IPHONE + if (!FunctionTable[0]) + { + pvr::lib::LIBTYPE lib = pvr::lib::openlib(libName); + if (!lib) + { + Log_Error("OpenGL ES Bindings: Failed to open library %s\n", libName); + } + else + { + Log_Info("OpenGL ES Bindings: Successfully loaded library %s for OpenGL ES 3.1\n", libName); + } + + FunctionTable[Gl31FuncName::DispatchCompute] = pvr::lib::getLibFunctionChecked(lib, "glDispatchCompute"); + FunctionTable[Gl31FuncName::DispatchComputeIndirect] = pvr::lib::getLibFunctionChecked(lib, "glDispatchComputeIndirect"); + FunctionTable[Gl31FuncName::DrawArraysIndirect] = pvr::lib::getLibFunctionChecked(lib, "glDrawArraysIndirect"); + FunctionTable[Gl31FuncName::DrawElementsIndirect] = pvr::lib::getLibFunctionChecked(lib, "glDrawElementsIndirect"); + FunctionTable[Gl31FuncName::FramebufferParameteri] = pvr::lib::getLibFunctionChecked(lib, "glFramebufferParameteri"); + FunctionTable[Gl31FuncName::GetFramebufferParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glGetFramebufferParameteriv"); + FunctionTable[Gl31FuncName::GetProgramInterfaceiv] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramInterfaceiv"); + FunctionTable[Gl31FuncName::GetProgramResourceIndex] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramResourceIndex"); + FunctionTable[Gl31FuncName::GetProgramResourceName] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramResourceName"); + FunctionTable[Gl31FuncName::GetProgramResourceiv] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramResourceiv"); + FunctionTable[Gl31FuncName::GetProgramResourceLocation] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramResourceLocation"); + FunctionTable[Gl31FuncName::UseProgramStages] = pvr::lib::getLibFunctionChecked(lib, "glUseProgramStages"); + FunctionTable[Gl31FuncName::ActiveShaderProgram] = pvr::lib::getLibFunctionChecked(lib, "glActiveShaderProgram"); + FunctionTable[Gl31FuncName::CreateShaderProgramv] = pvr::lib::getLibFunctionChecked(lib, "glCreateShaderProgramv"); + FunctionTable[Gl31FuncName::BindProgramPipeline] = pvr::lib::getLibFunctionChecked(lib, "glBindProgramPipeline"); + FunctionTable[Gl31FuncName::DeleteProgramPipelines] = pvr::lib::getLibFunctionChecked(lib, "glDeleteProgramPipelines"); + FunctionTable[Gl31FuncName::GenProgramPipelines] = pvr::lib::getLibFunctionChecked(lib, "glGenProgramPipelines"); + FunctionTable[Gl31FuncName::IsProgramPipeline] = pvr::lib::getLibFunctionChecked(lib, "glIsProgramPipeline"); + FunctionTable[Gl31FuncName::GetProgramPipelineiv] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramPipelineiv"); + FunctionTable[Gl31FuncName::ProgramUniform1i] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform1i"); + FunctionTable[Gl31FuncName::ProgramUniform2i] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform2i"); + FunctionTable[Gl31FuncName::ProgramUniform3i] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform3i"); + FunctionTable[Gl31FuncName::ProgramUniform4i] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform4i"); + FunctionTable[Gl31FuncName::ProgramUniform1ui] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform1ui"); + FunctionTable[Gl31FuncName::ProgramUniform2ui] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform2ui"); + FunctionTable[Gl31FuncName::ProgramUniform3ui] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform3ui"); + FunctionTable[Gl31FuncName::ProgramUniform4ui] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform4ui"); + FunctionTable[Gl31FuncName::ProgramUniform1f] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform1f"); + FunctionTable[Gl31FuncName::ProgramUniform2f] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform2f"); + FunctionTable[Gl31FuncName::ProgramUniform3f] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform3f"); + FunctionTable[Gl31FuncName::ProgramUniform4f] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform4f"); + FunctionTable[Gl31FuncName::ProgramUniform1iv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform1iv"); + FunctionTable[Gl31FuncName::ProgramUniform2iv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform2iv"); + FunctionTable[Gl31FuncName::ProgramUniform3iv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform3iv"); + FunctionTable[Gl31FuncName::ProgramUniform4iv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform4iv"); + FunctionTable[Gl31FuncName::ProgramUniform1uiv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform1uiv"); + FunctionTable[Gl31FuncName::ProgramUniform2uiv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform2uiv"); + FunctionTable[Gl31FuncName::ProgramUniform3uiv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform3uiv"); + FunctionTable[Gl31FuncName::ProgramUniform4uiv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform4uiv"); + FunctionTable[Gl31FuncName::ProgramUniform1fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform1fv"); + FunctionTable[Gl31FuncName::ProgramUniform2fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform2fv"); + FunctionTable[Gl31FuncName::ProgramUniform3fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform3fv"); + FunctionTable[Gl31FuncName::ProgramUniform4fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniform4fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix2fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix2fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix3fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix3fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix4fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix4fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix2x3fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix2x3fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix3x2fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix3x2fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix2x4fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix2x4fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix4x2fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix4x2fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix3x4fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix3x4fv"); + FunctionTable[Gl31FuncName::ProgramUniformMatrix4x3fv] = pvr::lib::getLibFunctionChecked(lib, "glProgramUniformMatrix4x3fv"); + FunctionTable[Gl31FuncName::ValidateProgramPipeline] = pvr::lib::getLibFunctionChecked(lib, "glValidateProgramPipeline"); + FunctionTable[Gl31FuncName::GetProgramPipelineInfoLog] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramPipelineInfoLog"); + FunctionTable[Gl31FuncName::BindImageTexture] = pvr::lib::getLibFunctionChecked(lib, "glBindImageTexture"); + FunctionTable[Gl31FuncName::GetBooleani_v] = pvr::lib::getLibFunctionChecked(lib, "glGetBooleani_v"); + FunctionTable[Gl31FuncName::MemoryBarrier] = pvr::lib::getLibFunctionChecked(lib, "glMemoryBarrier"); + FunctionTable[Gl31FuncName::MemoryBarrierByRegion] = pvr::lib::getLibFunctionChecked(lib, "glMemoryBarrierByRegion"); + FunctionTable[Gl31FuncName::TexStorage2DMultisample] = pvr::lib::getLibFunctionChecked(lib, "glTexStorage2DMultisample"); + FunctionTable[Gl31FuncName::GetMultisamplefv] = pvr::lib::getLibFunctionChecked(lib, "glGetMultisamplefv"); + FunctionTable[Gl31FuncName::SampleMaski] = pvr::lib::getLibFunctionChecked(lib, "glSampleMaski"); + FunctionTable[Gl31FuncName::GetTexLevelParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glGetTexLevelParameteriv"); + FunctionTable[Gl31FuncName::GetTexLevelParameterfv] = pvr::lib::getLibFunctionChecked(lib, "glGetTexLevelParameterfv"); + FunctionTable[Gl31FuncName::BindVertexBuffer] = pvr::lib::getLibFunctionChecked(lib, "glBindVertexBuffer"); + FunctionTable[Gl31FuncName::VertexAttribFormat] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribFormat"); + FunctionTable[Gl31FuncName::VertexAttribIFormat] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribIFormat"); + FunctionTable[Gl31FuncName::VertexAttribBinding] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribBinding"); + FunctionTable[Gl31FuncName::VertexBindingDivisor] = pvr::lib::getLibFunctionChecked(lib, "glVertexBindingDivisor"); + } +#endif + return FunctionTable[funcname]; +} +} +} + +/************ OPENGL ES API ************/ +#ifndef DYNAMICGLES_NO_NAMESPACE +namespace gl { +#elif TARGET_OS_IPHONE +namespace gl { +namespace internals { +#endif +inline void DYNAMICGLES_FUNCTION(DispatchCompute)(GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDISPATCHCOMPUTEPROC _DispatchCompute = (PFNGLDISPATCHCOMPUTEPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::DispatchCompute); + return _DispatchCompute(num_groups_x, num_groups_y, num_groups_z); +#endif +} +inline void DYNAMICGLES_FUNCTION(DispatchComputeIndirect)(GLintptr indirect) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDISPATCHCOMPUTEINDIRECTPROC _DispatchComputeIndirect = (PFNGLDISPATCHCOMPUTEINDIRECTPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::DispatchComputeIndirect); + return _DispatchComputeIndirect(indirect); +#endif +} +inline void DYNAMICGLES_FUNCTION(DrawArraysIndirect)(GLenum mode, const void* indirect) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDRAWARRAYSINDIRECTPROC _DrawArraysIndirect = (PFNGLDRAWARRAYSINDIRECTPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::DrawArraysIndirect); + return _DrawArraysIndirect(mode, indirect); +#endif +} +inline void DYNAMICGLES_FUNCTION(DrawElementsIndirect)(GLenum mode, GLenum type, const void* indirect) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDRAWELEMENTSINDIRECTPROC _DrawElementsIndirect = (PFNGLDRAWELEMENTSINDIRECTPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::DrawElementsIndirect); + return _DrawElementsIndirect(mode, type, indirect); +#endif +} +inline void DYNAMICGLES_FUNCTION(FramebufferParameteri)(GLenum target, GLenum pname, GLint param) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLFRAMEBUFFERPARAMETERIPROC _FramebufferParameteri = (PFNGLFRAMEBUFFERPARAMETERIPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::FramebufferParameteri); + return _FramebufferParameteri(target, pname, param); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetFramebufferParameteriv)(GLenum target, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETFRAMEBUFFERPARAMETERIVPROC _GetFramebufferParameteriv = (PFNGLGETFRAMEBUFFERPARAMETERIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetFramebufferParameteriv); + return _GetFramebufferParameteriv(target, pname, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetProgramInterfaceiv)(GLuint program, GLenum programInterface, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPROGRAMINTERFACEIVPROC _GetProgramInterfaceiv = (PFNGLGETPROGRAMINTERFACEIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetProgramInterfaceiv); + return _GetProgramInterfaceiv(program, programInterface, pname, params); +#endif +} +inline GLuint DYNAMICGLES_FUNCTION(GetProgramResourceIndex)(GLuint program, GLenum programInterface, const GLchar* name) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPROGRAMRESOURCEINDEXPROC _GetProgramResourceIndex = (PFNGLGETPROGRAMRESOURCEINDEXPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetProgramResourceIndex); + return _GetProgramResourceIndex(program, programInterface, name); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetProgramResourceName)(GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei* length, GLchar* name) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPROGRAMRESOURCENAMEPROC _GetProgramResourceName = (PFNGLGETPROGRAMRESOURCENAMEPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetProgramResourceName); + return _GetProgramResourceName(program, programInterface, index, bufSize, length, name); +#endif + +} +inline void DYNAMICGLES_FUNCTION(GetProgramResourceiv)(GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum* props, GLsizei bufSize, GLsizei* length, GLint* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPROGRAMRESOURCEIVPROC _GetProgramResourceiv = (PFNGLGETPROGRAMRESOURCEIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetProgramResourceiv); + return _GetProgramResourceiv(program, programInterface, index, propCount, props, bufSize, length, params); +#endif +} +inline GLint DYNAMICGLES_FUNCTION(GetProgramResourceLocation)(GLuint program, GLenum programInterface, const GLchar* name) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPROGRAMRESOURCELOCATIONPROC _GetProgramResourceLocation = (PFNGLGETPROGRAMRESOURCELOCATIONPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetProgramResourceLocation); + return _GetProgramResourceLocation(program, programInterface, name); +#endif +} +inline void DYNAMICGLES_FUNCTION(UseProgramStages)(GLuint pipeline, GLbitfield stages, GLuint program) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLUSEPROGRAMSTAGESPROC _UseProgramStages = (PFNGLUSEPROGRAMSTAGESPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::UseProgramStages); + return _UseProgramStages(pipeline, stages, program); +#endif +} +inline void DYNAMICGLES_FUNCTION(ActiveShaderProgram)(GLuint pipeline, GLuint program) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLACTIVESHADERPROGRAMPROC _ActiveShaderProgram = (PFNGLACTIVESHADERPROGRAMPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ActiveShaderProgram); + return _ActiveShaderProgram(pipeline, program); +#endif +} +inline GLuint DYNAMICGLES_FUNCTION(CreateShaderProgramv)(GLenum type, GLsizei count, const GLchar* const* strings) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLCREATESHADERPROGRAMVPROC _CreateShaderProgramv = (PFNGLCREATESHADERPROGRAMVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::CreateShaderProgramv); + return _CreateShaderProgramv(type, count, strings); +#endif +} +inline void DYNAMICGLES_FUNCTION(BindProgramPipeline)(GLuint pipeline) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBINDPROGRAMPIPELINEPROC _BindProgramPipeline = (PFNGLBINDPROGRAMPIPELINEPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::BindProgramPipeline); + return _BindProgramPipeline(pipeline); +#endif +} +inline void DYNAMICGLES_FUNCTION(DeleteProgramPipelines)(GLsizei n, const GLuint* pipelines) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDELETEPROGRAMPIPELINESPROC _DeleteProgramPipelines = (PFNGLDELETEPROGRAMPIPELINESPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::DeleteProgramPipelines); + return _DeleteProgramPipelines(n, pipelines); +#endif +} +inline void DYNAMICGLES_FUNCTION(GenProgramPipelines)(GLsizei n, GLuint* pipelines) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGENPROGRAMPIPELINESPROC _GenProgramPipelines = (PFNGLGENPROGRAMPIPELINESPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GenProgramPipelines); + return _GenProgramPipelines(n, pipelines); +#endif +} +inline GLboolean DYNAMICGLES_FUNCTION(IsProgramPipeline)(GLuint pipeline) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLISPROGRAMPIPELINEPROC _IsProgramPipeline = (PFNGLISPROGRAMPIPELINEPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::IsProgramPipeline); + return _IsProgramPipeline(pipeline); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetProgramPipelineiv)(GLuint pipeline, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPROGRAMPIPELINEIVPROC _GetProgramPipelineiv = (PFNGLGETPROGRAMPIPELINEIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetProgramPipelineiv); + return _GetProgramPipelineiv(pipeline, pname, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1i)(GLuint program, GLint location, GLint v0) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1IPROC _ProgramUniform1i = (PFNGLPROGRAMUNIFORM1IPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform1i); + return _ProgramUniform1i(program, location, v0); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2i)(GLuint program, GLint location, GLint v0, GLint v1) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2IPROC _ProgramUniform2i = (PFNGLPROGRAMUNIFORM2IPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform2i); + return _ProgramUniform2i(program, location, v0, v1); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3i)(GLuint program, GLint location, GLint v0, GLint v1, GLint v2) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3IPROC _ProgramUniform3i = (PFNGLPROGRAMUNIFORM3IPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform3i); + return _ProgramUniform3i(program, location, v0, v1, v2); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4i)(GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4IPROC _ProgramUniform4i = (PFNGLPROGRAMUNIFORM4IPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform4i); + return _ProgramUniform4i(program, location, v0, v1, v2, v3); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1ui)(GLuint program, GLint location, GLuint v0) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1UIPROC _ProgramUniform1ui = (PFNGLPROGRAMUNIFORM1UIPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform1ui); + return _ProgramUniform1ui(program, location, v0); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2ui)(GLuint program, GLint location, GLuint v0, GLuint v1) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2UIPROC _ProgramUniform2ui = (PFNGLPROGRAMUNIFORM2UIPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform2ui); + return _ProgramUniform2ui(program, location, v0, v1); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3ui)(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3UIPROC _ProgramUniform3ui = (PFNGLPROGRAMUNIFORM3UIPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform3ui); + return _ProgramUniform3ui(program, location, v0, v1, v2); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4ui)(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4UIPROC _ProgramUniform4ui = (PFNGLPROGRAMUNIFORM4UIPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform4ui); + return _ProgramUniform4ui(program, location, v0, v1, v2, v3); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1f)(GLuint program, GLint location, GLfloat v0) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1FPROC _ProgramUniform1f = (PFNGLPROGRAMUNIFORM1FPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform1f); + return _ProgramUniform1f(program, location, v0); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2f)(GLuint program, GLint location, GLfloat v0, GLfloat v1) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2FPROC _ProgramUniform2f = (PFNGLPROGRAMUNIFORM2FPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform2f); + return _ProgramUniform2f(program, location, v0, v1); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3f)(GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3FPROC _ProgramUniform3f = (PFNGLPROGRAMUNIFORM3FPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform3f); + return _ProgramUniform3f(program, location, v0, v1, v2); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4f)(GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4FPROC _ProgramUniform4f = (PFNGLPROGRAMUNIFORM4FPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform4f); + return _ProgramUniform4f(program, location, v0, v1, v2, v3); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1iv)(GLuint program, GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1IVPROC _ProgramUniform1iv = (PFNGLPROGRAMUNIFORM1IVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform1iv); + return _ProgramUniform1iv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2iv)(GLuint program, GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2IVPROC _ProgramUniform2iv = (PFNGLPROGRAMUNIFORM2IVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform2iv); + return _ProgramUniform2iv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3iv)(GLuint program, GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3IVPROC _ProgramUniform3iv = (PFNGLPROGRAMUNIFORM3IVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform3iv); + return _ProgramUniform3iv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4iv)(GLuint program, GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4IVPROC _ProgramUniform4iv = (PFNGLPROGRAMUNIFORM4IVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform4iv); + return _ProgramUniform4iv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1uiv)(GLuint program, GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1UIVPROC _ProgramUniform1uiv = (PFNGLPROGRAMUNIFORM1UIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform1uiv); + return _ProgramUniform1uiv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2uiv)(GLuint program, GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2UIVPROC _ProgramUniform2uiv = (PFNGLPROGRAMUNIFORM2UIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform2uiv); + return _ProgramUniform2uiv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3uiv)(GLuint program, GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3UIVPROC _ProgramUniform3uiv = (PFNGLPROGRAMUNIFORM3UIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform3uiv); + return _ProgramUniform3uiv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4uiv)(GLuint program, GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4UIVPROC _ProgramUniform4uiv = (PFNGLPROGRAMUNIFORM4UIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform4uiv); + return _ProgramUniform4uiv(program, location, count, value); + +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1fv)(GLuint program, GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1FVPROC _ProgramUniform1fv = (PFNGLPROGRAMUNIFORM1FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform1fv); + return _ProgramUniform1fv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2fv)(GLuint program, GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2FVPROC _ProgramUniform2fv = (PFNGLPROGRAMUNIFORM2FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform2fv); + return _ProgramUniform2fv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3fv)(GLuint program, GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3FVPROC _ProgramUniform3fv = (PFNGLPROGRAMUNIFORM3FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform3fv); + return _ProgramUniform3fv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4fv)(GLuint program, GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4FVPROC _ProgramUniform4fv = (PFNGLPROGRAMUNIFORM4FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniform4fv); + return _ProgramUniform4fv(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix2fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX2FVPROC _ProgramUniformMatrix2fv = (PFNGLPROGRAMUNIFORMMATRIX2FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix2fv); + return _ProgramUniformMatrix2fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix3fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX3FVPROC _ProgramUniformMatrix3fv = (PFNGLPROGRAMUNIFORMMATRIX3FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix3fv); + return _ProgramUniformMatrix3fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix4fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX4FVPROC _ProgramUniformMatrix4fv = (PFNGLPROGRAMUNIFORMMATRIX4FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix4fv); + return _ProgramUniformMatrix4fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix2x3fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC _ProgramUniformMatrix2x3fv = (PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix2x3fv); + return _ProgramUniformMatrix2x3fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix3x2fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC _ProgramUniformMatrix3x2fv = (PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix3x2fv); + return _ProgramUniformMatrix3x2fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix2x4fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC _ProgramUniformMatrix2x4fv = (PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix2x4fv); + return _ProgramUniformMatrix2x4fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix4x2fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC _ProgramUniformMatrix4x2fv = (PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix4x2fv); + return _ProgramUniformMatrix4x2fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix3x4fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC _ProgramUniformMatrix3x4fv = (PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix3x4fv); + return _ProgramUniformMatrix3x4fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix4x3fv)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC _ProgramUniformMatrix4x3fv = (PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ProgramUniformMatrix4x3fv); + return _ProgramUniformMatrix4x3fv(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ValidateProgramPipeline)(GLuint pipeline) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLVALIDATEPROGRAMPIPELINEPROC _ValidateProgramPipeline = (PFNGLVALIDATEPROGRAMPIPELINEPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::ValidateProgramPipeline); + return _ValidateProgramPipeline(pipeline); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetProgramPipelineInfoLog)(GLuint pipeline, GLsizei bufSize, GLsizei* length, GLchar* infoLog) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPROGRAMPIPELINEINFOLOGPROC _GetProgramPipelineInfoLog = (PFNGLGETPROGRAMPIPELINEINFOLOGPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetProgramPipelineInfoLog); + return _GetProgramPipelineInfoLog(pipeline, bufSize, length, infoLog); +#endif + +} +inline void DYNAMICGLES_FUNCTION(BindImageTexture)(GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBINDIMAGETEXTUREPROC _BindImageTexture = (PFNGLBINDIMAGETEXTUREPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::BindImageTexture); + return _BindImageTexture(unit, texture, level, layered, layer, access, format); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetBooleani_v)(GLenum target, GLuint index, GLboolean* data) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETBOOLEANI_VPROC _GetBooleani_v = (PFNGLGETBOOLEANI_VPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetBooleani_v); + return _GetBooleani_v(target, index, data); +#endif +} +inline void DYNAMICGLES_FUNCTION(MemoryBarrier)(GLbitfield barriers) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLMEMORYBARRIERPROC _MemoryBarrier = (PFNGLMEMORYBARRIERPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::MemoryBarrier); + return _MemoryBarrier(barriers); +#endif +} +inline void DYNAMICGLES_FUNCTION(MemoryBarrierByRegion)(GLbitfield barriers) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLMEMORYBARRIERBYREGIONPROC _MemoryBarrierByRegion = (PFNGLMEMORYBARRIERBYREGIONPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::MemoryBarrierByRegion); + return _MemoryBarrierByRegion(barriers); +#endif +} +inline void DYNAMICGLES_FUNCTION(TexStorage2DMultisample)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXSTORAGE2DMULTISAMPLEPROC _TexStorage2DMultisample = (PFNGLTEXSTORAGE2DMULTISAMPLEPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::TexStorage2DMultisample); + return _TexStorage2DMultisample(target, samples, internalformat, width, height, fixedsamplelocations); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetMultisamplefv)(GLenum pname, GLuint index, GLfloat* val) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETMULTISAMPLEFVPROC _GetMultisamplefv = (PFNGLGETMULTISAMPLEFVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetMultisamplefv); + return _GetMultisamplefv(pname, index, val); +#endif +} +inline void DYNAMICGLES_FUNCTION(SampleMaski)(GLuint maskNumber, GLbitfield mask) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLSAMPLEMASKIPROC _SampleMaski = (PFNGLSAMPLEMASKIPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::SampleMaski); + return _SampleMaski(maskNumber, mask); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetTexLevelParameteriv)(GLenum target, GLint level, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETTEXLEVELPARAMETERIVPROC _GetTexLevelParameteriv = (PFNGLGETTEXLEVELPARAMETERIVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetTexLevelParameteriv); + return _GetTexLevelParameteriv(target, level, pname, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetTexLevelParameterfv)(GLenum target, GLint level, GLenum pname, GLfloat* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETTEXLEVELPARAMETERFVPROC _GetTexLevelParameterfv = (PFNGLGETTEXLEVELPARAMETERFVPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::GetTexLevelParameterfv); + return _GetTexLevelParameterfv(target, level, pname, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(BindVertexBuffer)(GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBINDVERTEXBUFFERPROC _BindVertexBuffer = (PFNGLBINDVERTEXBUFFERPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::BindVertexBuffer); + return _BindVertexBuffer(bindingindex, buffer, offset, stride); +#endif +} +inline void DYNAMICGLES_FUNCTION(VertexAttribFormat)(GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLVERTEXATTRIBFORMATPROC _VertexAttribFormat = (PFNGLVERTEXATTRIBFORMATPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::VertexAttribFormat); + return _VertexAttribFormat(attribindex, size, type, normalized, relativeoffset); +#endif +} +inline void DYNAMICGLES_FUNCTION(VertexAttribIFormat)(GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLVERTEXATTRIBIFORMATPROC _VertexAttribIFormat = (PFNGLVERTEXATTRIBIFORMATPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::VertexAttribIFormat); + return _VertexAttribIFormat(attribindex, size, type, relativeoffset); +#endif +} +inline void DYNAMICGLES_FUNCTION(VertexAttribBinding)(GLuint attribindex, GLuint bindingindex) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLVERTEXATTRIBBINDINGPROC _VertexAttribBinding = (PFNGLVERTEXATTRIBBINDINGPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::VertexAttribBinding); + return _VertexAttribBinding(attribindex, bindingindex); +#endif +} +inline void DYNAMICGLES_FUNCTION(VertexBindingDivisor)(GLuint bindingindex, GLuint divisor) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLVERTEXBINDINGDIVISORPROC _VertexBindingDivisor = (PFNGLVERTEXBINDINGDIVISORPROC)gl::internals::getEs31Function(gl::internals::Gl31FuncName::VertexBindingDivisor); + return _VertexBindingDivisor(bindingindex, divisor); +#endif +} +#ifndef DYNAMICGLES_NO_NAMESPACE +} +#elif TARGET_OS_IPHONE +} +} +#endif + + +namespace gl { +namespace internals { +namespace Gl3FuncName { +enum OpenGLES3FunctionName +{ + //// OPENGL ES 3 //// + + ReadBuffer, DrawRangeElements, TexImage3D, TexSubImage3D, CopyTexSubImage3D, CompressedTexImage3D, CompressedTexSubImage3D, GenQueries, DeleteQueries, IsQuery, + BeginQuery, EndQuery, GetQueryiv, GetQueryObjectuiv, UnmapBuffer, GetBufferPointerv, DrawBuffers, UniformMatrix2x3fv, UniformMatrix3x2fv, UniformMatrix2x4fv, + UniformMatrix4x2fv, UniformMatrix3x4fv, UniformMatrix4x3fv, BlitFramebuffer, RenderbufferStorageMultisample, FramebufferTextureLayer, MapBufferRange, + FlushMappedBufferRange, BindVertexArray, DeleteVertexArrays, GenVertexArrays, IsVertexArray, GetIntegeri_v, BeginTransformFeedback, EndTransformFeedback, + BindBufferRange, BindBufferBase, TransformFeedbackVaryings, GetTransformFeedbackVarying, VertexAttribIPointer, GetVertexAttribIiv, GetVertexAttribIuiv, + VertexAttribI4i, VertexAttribI4ui, VertexAttribI4iv, VertexAttribI4uiv, GetUniformuiv, GetFragDataLocation, Uniform1ui, Uniform2ui, Uniform3ui, Uniform4ui, + Uniform1uiv, Uniform2uiv, Uniform3uiv, Uniform4uiv, ClearBufferiv, ClearBufferuiv, ClearBufferfv, ClearBufferfi, GetStringi, CopyBufferSubData, GetUniformIndices, + GetActiveUniformsiv, GetUniformBlockIndex, GetActiveUniformBlockiv, GetActiveUniformBlockName, UniformBlockBinding, DrawArraysInstanced, DrawElementsInstanced, + FenceSync, IsSync, DeleteSync, ClientWaitSync, WaitSync, GetInteger64v, GetSynciv, GetInteger64i_v, GetBufferParameteri64v, GenSamplers, DeleteSamplers, IsSampler, + BindSampler, SamplerParameteri, SamplerParameteriv, SamplerParameterf, SamplerParameterfv, GetSamplerParameteriv, GetSamplerParameterfv, VertexAttribDivisor, + BindTransformFeedback, DeleteTransformFeedbacks, GenTransformFeedbacks, IsTransformFeedback, PauseTransformFeedback, ResumeTransformFeedback, GetProgramBinary, + ProgramBinary, ProgramParameteri, InvalidateFramebuffer, InvalidateSubFramebuffer, TexStorage2D, TexStorage3D, GetInternalformativ, + NUMBER_OF_OPENGLES3_FUNCTIONS +}; +} + +inline void* getEs3Function(gl::internals::Gl3FuncName::OpenGLES3FunctionName funcname) +{ + static void* FunctionTable[Gl3FuncName::NUMBER_OF_OPENGLES3_FUNCTIONS]; + + // GET FUNCTION POINTERS --- ONCE!!!! /// + if (!FunctionTable[0]) + { +#if !TARGET_OS_IPHONE + pvr::lib::LIBTYPE lib = pvr::lib::openlib(gl::internals::libName); + if (!lib) + { + Log_Error("OpenGL ES Bindings: Failed to open library %s\n", gl::internals::libName); + } + else + { + Log_Info("OpenGL ES Bindings: Successfully loaded library %s for OpenGL ES 3.0\n", gl::internals::libName); + } + FunctionTable[Gl3FuncName::ReadBuffer] = pvr::lib::getLibFunctionChecked(lib, "glReadBuffer"); + FunctionTable[Gl3FuncName::DrawRangeElements] = pvr::lib::getLibFunctionChecked(lib, "glDrawRangeElements"); + FunctionTable[Gl3FuncName::TexImage3D] = pvr::lib::getLibFunctionChecked(lib, "glTexImage3D"); + FunctionTable[Gl3FuncName::TexSubImage3D] = pvr::lib::getLibFunctionChecked(lib, "glTexSubImage3D"); + FunctionTable[Gl3FuncName::CopyTexSubImage3D] = pvr::lib::getLibFunctionChecked(lib, "glCopyTexSubImage3D"); + FunctionTable[Gl3FuncName::CompressedTexImage3D] = pvr::lib::getLibFunctionChecked(lib, "glCompressedTexImage3D"); + FunctionTable[Gl3FuncName::CompressedTexSubImage3D] = pvr::lib::getLibFunctionChecked(lib, "glCompressedTexSubImage3D"); + FunctionTable[Gl3FuncName::GenQueries] = pvr::lib::getLibFunctionChecked(lib, "glGenQueries"); + FunctionTable[Gl3FuncName::DeleteQueries] = pvr::lib::getLibFunctionChecked(lib, "glDeleteQueries"); + FunctionTable[Gl3FuncName::IsQuery] = pvr::lib::getLibFunctionChecked(lib, "glIsQuery"); + FunctionTable[Gl3FuncName::BeginQuery] = pvr::lib::getLibFunctionChecked(lib, "glBeginQuery"); + FunctionTable[Gl3FuncName::EndQuery] = pvr::lib::getLibFunctionChecked(lib, "glEndQuery"); + FunctionTable[Gl3FuncName::GetQueryiv] = pvr::lib::getLibFunctionChecked(lib, "glGetQueryiv"); + FunctionTable[Gl3FuncName::GetQueryObjectuiv] = pvr::lib::getLibFunctionChecked(lib, "glGetQueryObjectuiv"); + FunctionTable[Gl3FuncName::UnmapBuffer] = pvr::lib::getLibFunctionChecked(lib, "glUnmapBuffer"); + FunctionTable[Gl3FuncName::GetBufferPointerv] = pvr::lib::getLibFunctionChecked(lib, "glGetBufferPointerv"); + FunctionTable[Gl3FuncName::DrawBuffers] = pvr::lib::getLibFunctionChecked(lib, "glDrawBuffers"); + FunctionTable[Gl3FuncName::UniformMatrix2x3fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix2x3fv"); + FunctionTable[Gl3FuncName::UniformMatrix3x2fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix3x2fv"); + FunctionTable[Gl3FuncName::UniformMatrix2x4fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix2x4fv"); + FunctionTable[Gl3FuncName::UniformMatrix4x2fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix4x2fv"); + FunctionTable[Gl3FuncName::UniformMatrix3x4fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix3x4fv"); + FunctionTable[Gl3FuncName::UniformMatrix4x3fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix4x3fv"); + FunctionTable[Gl3FuncName::BlitFramebuffer] = pvr::lib::getLibFunctionChecked(lib, "glBlitFramebuffer"); + FunctionTable[Gl3FuncName::RenderbufferStorageMultisample] = pvr::lib::getLibFunctionChecked(lib, "glRenderbufferStorageMultisample"); + FunctionTable[Gl3FuncName::FramebufferTextureLayer] = pvr::lib::getLibFunctionChecked(lib, "glFramebufferTextureLayer"); + FunctionTable[Gl3FuncName::MapBufferRange] = pvr::lib::getLibFunctionChecked(lib, "glMapBufferRange"); + FunctionTable[Gl3FuncName::FlushMappedBufferRange] = pvr::lib::getLibFunctionChecked(lib, "glFlushMappedBufferRange"); + FunctionTable[Gl3FuncName::BindVertexArray] = pvr::lib::getLibFunctionChecked(lib, "glBindVertexArray"); + FunctionTable[Gl3FuncName::DeleteVertexArrays] = pvr::lib::getLibFunctionChecked(lib, "glDeleteVertexArrays"); + FunctionTable[Gl3FuncName::GenVertexArrays] = pvr::lib::getLibFunctionChecked(lib, "glGenVertexArrays"); + FunctionTable[Gl3FuncName::IsVertexArray] = pvr::lib::getLibFunctionChecked(lib, "glIsVertexArray"); + FunctionTable[Gl3FuncName::GetIntegeri_v] = pvr::lib::getLibFunctionChecked(lib, "glGetIntegeri_v"); + FunctionTable[Gl3FuncName::BeginTransformFeedback] = pvr::lib::getLibFunctionChecked(lib, "glBeginTransformFeedback"); + FunctionTable[Gl3FuncName::EndTransformFeedback] = pvr::lib::getLibFunctionChecked(lib, "glEndTransformFeedback"); + FunctionTable[Gl3FuncName::BindBufferRange] = pvr::lib::getLibFunctionChecked(lib, "glBindBufferRange"); + FunctionTable[Gl3FuncName::BindBufferBase] = pvr::lib::getLibFunctionChecked(lib, "glBindBufferBase"); + FunctionTable[Gl3FuncName::TransformFeedbackVaryings] = pvr::lib::getLibFunctionChecked(lib, "glTransformFeedbackVaryings"); + FunctionTable[Gl3FuncName::GetTransformFeedbackVarying] = pvr::lib::getLibFunctionChecked(lib, "glGetTransformFeedbackVarying"); + FunctionTable[Gl3FuncName::VertexAttribIPointer] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribIPointer"); + FunctionTable[Gl3FuncName::GetVertexAttribIiv] = pvr::lib::getLibFunctionChecked(lib, "glGetVertexAttribIiv"); + FunctionTable[Gl3FuncName::GetVertexAttribIuiv] = pvr::lib::getLibFunctionChecked(lib, "glGetVertexAttribIuiv"); + FunctionTable[Gl3FuncName::VertexAttribI4i] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribI4i"); + FunctionTable[Gl3FuncName::VertexAttribI4ui] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribI4ui"); + FunctionTable[Gl3FuncName::VertexAttribI4iv] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribI4iv"); + FunctionTable[Gl3FuncName::VertexAttribI4uiv] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribI4uiv"); + FunctionTable[Gl3FuncName::GetUniformuiv] = pvr::lib::getLibFunctionChecked(lib, "glGetUniformuiv"); + FunctionTable[Gl3FuncName::GetFragDataLocation] = pvr::lib::getLibFunctionChecked(lib, "glGetFragDataLocation"); + FunctionTable[Gl3FuncName::Uniform1ui] = pvr::lib::getLibFunctionChecked(lib, "glUniform1ui"); + FunctionTable[Gl3FuncName::Uniform2ui] = pvr::lib::getLibFunctionChecked(lib, "glUniform2ui"); + FunctionTable[Gl3FuncName::Uniform3ui] = pvr::lib::getLibFunctionChecked(lib, "glUniform3ui"); + FunctionTable[Gl3FuncName::Uniform4ui] = pvr::lib::getLibFunctionChecked(lib, "glUniform4ui"); + FunctionTable[Gl3FuncName::Uniform1uiv] = pvr::lib::getLibFunctionChecked(lib, "glUniform1uiv"); + FunctionTable[Gl3FuncName::Uniform2uiv] = pvr::lib::getLibFunctionChecked(lib, "glUniform2uiv"); + FunctionTable[Gl3FuncName::Uniform3uiv] = pvr::lib::getLibFunctionChecked(lib, "glUniform3uiv"); + FunctionTable[Gl3FuncName::Uniform4uiv] = pvr::lib::getLibFunctionChecked(lib, "glUniform4uiv"); + FunctionTable[Gl3FuncName::ClearBufferiv] = pvr::lib::getLibFunctionChecked(lib, "glClearBufferiv"); + FunctionTable[Gl3FuncName::ClearBufferuiv] = pvr::lib::getLibFunctionChecked(lib, "glClearBufferuiv"); + FunctionTable[Gl3FuncName::ClearBufferfv] = pvr::lib::getLibFunctionChecked(lib, "glClearBufferfv"); + FunctionTable[Gl3FuncName::ClearBufferfi] = pvr::lib::getLibFunctionChecked(lib, "glClearBufferfi"); + FunctionTable[Gl3FuncName::GetStringi] = pvr::lib::getLibFunctionChecked(lib, "glGetStringi"); + FunctionTable[Gl3FuncName::CopyBufferSubData] = pvr::lib::getLibFunctionChecked(lib, "glCopyBufferSubData"); + FunctionTable[Gl3FuncName::GetUniformIndices] = pvr::lib::getLibFunctionChecked(lib, "glGetUniformIndices"); + FunctionTable[Gl3FuncName::GetActiveUniformsiv] = pvr::lib::getLibFunctionChecked(lib, "glGetActiveUniformsiv"); + FunctionTable[Gl3FuncName::GetUniformBlockIndex] = pvr::lib::getLibFunctionChecked(lib, "glGetUniformBlockIndex"); + FunctionTable[Gl3FuncName::GetActiveUniformBlockiv] = pvr::lib::getLibFunctionChecked(lib, "glGetActiveUniformBlockiv"); + FunctionTable[Gl3FuncName::GetActiveUniformBlockName] = pvr::lib::getLibFunctionChecked(lib, "glGetActiveUniformBlockName"); + FunctionTable[Gl3FuncName::UniformBlockBinding] = pvr::lib::getLibFunctionChecked(lib, "glUniformBlockBinding"); + FunctionTable[Gl3FuncName::DrawArraysInstanced] = pvr::lib::getLibFunctionChecked(lib, "glDrawArraysInstanced"); + FunctionTable[Gl3FuncName::DrawElementsInstanced] = pvr::lib::getLibFunctionChecked(lib, "glDrawElementsInstanced"); + FunctionTable[Gl3FuncName::FenceSync] = pvr::lib::getLibFunctionChecked(lib, "glFenceSync"); + FunctionTable[Gl3FuncName::IsSync] = pvr::lib::getLibFunctionChecked(lib, "glIsSync"); + FunctionTable[Gl3FuncName::DeleteSync] = pvr::lib::getLibFunctionChecked(lib, "glDeleteSync"); + FunctionTable[Gl3FuncName::ClientWaitSync] = pvr::lib::getLibFunctionChecked(lib, "glClientWaitSync"); + FunctionTable[Gl3FuncName::WaitSync] = pvr::lib::getLibFunctionChecked(lib, "glWaitSync"); + FunctionTable[Gl3FuncName::GetInteger64v] = pvr::lib::getLibFunctionChecked(lib, "glGetInteger64v"); + FunctionTable[Gl3FuncName::GetSynciv] = pvr::lib::getLibFunctionChecked(lib, "glGetSynciv"); + FunctionTable[Gl3FuncName::GetInteger64i_v] = pvr::lib::getLibFunctionChecked(lib, "glGetInteger64i_v"); + FunctionTable[Gl3FuncName::GetBufferParameteri64v] = pvr::lib::getLibFunctionChecked(lib, "glGetBufferParameteri64v"); + FunctionTable[Gl3FuncName::GenSamplers] = pvr::lib::getLibFunctionChecked(lib, "glGenSamplers"); + FunctionTable[Gl3FuncName::DeleteSamplers] = pvr::lib::getLibFunctionChecked(lib, "glDeleteSamplers"); + FunctionTable[Gl3FuncName::IsSampler] = pvr::lib::getLibFunctionChecked(lib, "glIsSampler"); + FunctionTable[Gl3FuncName::BindSampler] = pvr::lib::getLibFunctionChecked(lib, "glBindSampler"); + FunctionTable[Gl3FuncName::SamplerParameteri] = pvr::lib::getLibFunctionChecked(lib, "glSamplerParameteri"); + FunctionTable[Gl3FuncName::SamplerParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glSamplerParameteriv"); + FunctionTable[Gl3FuncName::SamplerParameterf] = pvr::lib::getLibFunctionChecked(lib, "glSamplerParameterf"); + FunctionTable[Gl3FuncName::SamplerParameterfv] = pvr::lib::getLibFunctionChecked(lib, "glSamplerParameterfv"); + FunctionTable[Gl3FuncName::GetSamplerParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glGetSamplerParameteriv"); + FunctionTable[Gl3FuncName::GetSamplerParameterfv] = pvr::lib::getLibFunctionChecked(lib, "glGetSamplerParameterfv"); + FunctionTable[Gl3FuncName::VertexAttribDivisor] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribDivisor"); + FunctionTable[Gl3FuncName::BindTransformFeedback] = pvr::lib::getLibFunctionChecked(lib, "glBindTransformFeedback"); + FunctionTable[Gl3FuncName::DeleteTransformFeedbacks] = pvr::lib::getLibFunctionChecked(lib, "glDeleteTransformFeedbacks"); + FunctionTable[Gl3FuncName::GenTransformFeedbacks] = pvr::lib::getLibFunctionChecked(lib, "glGenTransformFeedbacks"); + FunctionTable[Gl3FuncName::IsTransformFeedback] = pvr::lib::getLibFunctionChecked(lib, "glIsTransformFeedback"); + FunctionTable[Gl3FuncName::PauseTransformFeedback] = pvr::lib::getLibFunctionChecked(lib, "glPauseTransformFeedback"); + FunctionTable[Gl3FuncName::ResumeTransformFeedback] = pvr::lib::getLibFunctionChecked(lib, "glResumeTransformFeedback"); + FunctionTable[Gl3FuncName::GetProgramBinary] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramBinary"); + FunctionTable[Gl3FuncName::ProgramBinary] = pvr::lib::getLibFunctionChecked(lib, "glProgramBinary"); + FunctionTable[Gl3FuncName::ProgramParameteri] = pvr::lib::getLibFunctionChecked(lib, "glProgramParameteri"); + FunctionTable[Gl3FuncName::InvalidateFramebuffer] = pvr::lib::getLibFunctionChecked(lib, "glInvalidateFramebuffer"); + FunctionTable[Gl3FuncName::InvalidateSubFramebuffer] = pvr::lib::getLibFunctionChecked(lib, "glInvalidateSubFramebuffer"); + FunctionTable[Gl3FuncName::TexStorage2D] = pvr::lib::getLibFunctionChecked(lib, "glTexStorage2D"); + FunctionTable[Gl3FuncName::TexStorage3D] = pvr::lib::getLibFunctionChecked(lib, "glTexStorage3D"); + FunctionTable[Gl3FuncName::GetInternalformativ] = pvr::lib::getLibFunctionChecked(lib, "glGetInternalformativ"); +#else + FunctionTable[Gl3FuncName::ReadBuffer] = (void*)&glReadBuffer; + FunctionTable[Gl3FuncName::DrawRangeElements] = (void*)&glDrawRangeElements; + FunctionTable[Gl3FuncName::TexImage3D] = (void*)&glTexImage3D; + FunctionTable[Gl3FuncName::TexSubImage3D] = (void*)&glTexSubImage3D; + FunctionTable[Gl3FuncName::CopyTexSubImage3D] = (void*)&glCopyTexSubImage3D; + FunctionTable[Gl3FuncName::CompressedTexImage3D] = (void*)&glCompressedTexImage3D; + FunctionTable[Gl3FuncName::CompressedTexSubImage3D] = (void*)&glCompressedTexSubImage3D; + FunctionTable[Gl3FuncName::GenQueries] = (void*)&glGenQueries; + FunctionTable[Gl3FuncName::DeleteQueries] = (void*)&glDeleteQueries; + FunctionTable[Gl3FuncName::IsQuery] = (void*)&glIsQuery; + FunctionTable[Gl3FuncName::BeginQuery] = (void*)&glBeginQuery; + FunctionTable[Gl3FuncName::EndQuery] = (void*)&glEndQuery; + FunctionTable[Gl3FuncName::GetQueryiv] = (void*)&glGetQueryiv; + FunctionTable[Gl3FuncName::GetQueryObjectuiv] = (void*)&glGetQueryObjectuiv; + FunctionTable[Gl3FuncName::UnmapBuffer] = (void*)&glUnmapBuffer; + FunctionTable[Gl3FuncName::GetBufferPointerv] = (void*)&glGetBufferPointerv; + FunctionTable[Gl3FuncName::DrawBuffers] = (void*)&glDrawBuffers; + FunctionTable[Gl3FuncName::UniformMatrix2x3fv] = (void*)&glUniformMatrix2x3fv; + FunctionTable[Gl3FuncName::UniformMatrix3x2fv] = (void*)&glUniformMatrix3x2fv; + FunctionTable[Gl3FuncName::UniformMatrix2x4fv] = (void*)&glUniformMatrix2x4fv; + FunctionTable[Gl3FuncName::UniformMatrix4x2fv] = (void*)&glUniformMatrix4x2fv; + FunctionTable[Gl3FuncName::UniformMatrix3x4fv] = (void*)&glUniformMatrix3x4fv; + FunctionTable[Gl3FuncName::UniformMatrix4x3fv] = (void*)&glUniformMatrix4x3fv; + FunctionTable[Gl3FuncName::BlitFramebuffer] = (void*)&glBlitFramebuffer; + FunctionTable[Gl3FuncName::RenderbufferStorageMultisample] = (void*)&glRenderbufferStorageMultisample; + FunctionTable[Gl3FuncName::FramebufferTextureLayer] = (void*)&glFramebufferTextureLayer; + FunctionTable[Gl3FuncName::MapBufferRange] = (void*)&glMapBufferRange; + FunctionTable[Gl3FuncName::FlushMappedBufferRange] = (void*)&glFlushMappedBufferRange; + FunctionTable[Gl3FuncName::BindVertexArray] = (void*)&glBindVertexArray; + FunctionTable[Gl3FuncName::DeleteVertexArrays] = (void*)&glDeleteVertexArrays; + FunctionTable[Gl3FuncName::GenVertexArrays] = (void*)&glGenVertexArrays; + FunctionTable[Gl3FuncName::IsVertexArray] = (void*)&glIsVertexArray; + FunctionTable[Gl3FuncName::GetIntegeri_v] = (void*)&glGetIntegeri_v; + FunctionTable[Gl3FuncName::BeginTransformFeedback] = (void*)&glBeginTransformFeedback; + FunctionTable[Gl3FuncName::EndTransformFeedback] = (void*)&glEndTransformFeedback; + FunctionTable[Gl3FuncName::BindBufferRange] = (void*)&glBindBufferRange; + FunctionTable[Gl3FuncName::BindBufferBase] = (void*)&glBindBufferBase; + FunctionTable[Gl3FuncName::TransformFeedbackVaryings] = (void*)&glTransformFeedbackVaryings; + FunctionTable[Gl3FuncName::GetTransformFeedbackVarying] = (void*)&glGetTransformFeedbackVarying; + FunctionTable[Gl3FuncName::VertexAttribIPointer] = (void*)&glVertexAttribIPointer; + FunctionTable[Gl3FuncName::GetVertexAttribIiv] = (void*)&glGetVertexAttribIiv; + FunctionTable[Gl3FuncName::GetVertexAttribIuiv] = (void*)&glGetVertexAttribIuiv; + FunctionTable[Gl3FuncName::VertexAttribI4i] = (void*)&glVertexAttribI4i; + FunctionTable[Gl3FuncName::VertexAttribI4ui] = (void*)&glVertexAttribI4ui; + FunctionTable[Gl3FuncName::VertexAttribI4iv] = (void*)&glVertexAttribI4iv; + FunctionTable[Gl3FuncName::VertexAttribI4uiv] = (void*)&glVertexAttribI4uiv; + FunctionTable[Gl3FuncName::GetUniformuiv] = (void*)&glGetUniformuiv; + FunctionTable[Gl3FuncName::GetFragDataLocation] = (void*)&glGetFragDataLocation; + FunctionTable[Gl3FuncName::Uniform1ui] = (void*)&glUniform1ui; + FunctionTable[Gl3FuncName::Uniform2ui] = (void*)&glUniform2ui; + FunctionTable[Gl3FuncName::Uniform3ui] = (void*)&glUniform3ui; + FunctionTable[Gl3FuncName::Uniform4ui] = (void*)&glUniform4ui; + FunctionTable[Gl3FuncName::Uniform1uiv] = (void*)&glUniform1uiv; + FunctionTable[Gl3FuncName::Uniform2uiv] = (void*)&glUniform2uiv; + FunctionTable[Gl3FuncName::Uniform3uiv] = (void*)&glUniform3uiv; + FunctionTable[Gl3FuncName::Uniform4uiv] = (void*)&glUniform4uiv; + FunctionTable[Gl3FuncName::ClearBufferiv] = (void*)&glClearBufferiv; + FunctionTable[Gl3FuncName::ClearBufferuiv] = (void*)&glClearBufferuiv; + FunctionTable[Gl3FuncName::ClearBufferfv] = (void*)&glClearBufferfv; + FunctionTable[Gl3FuncName::ClearBufferfi] = (void*)&glClearBufferfi; + FunctionTable[Gl3FuncName::GetStringi] = (void*)&glGetStringi; + FunctionTable[Gl3FuncName::CopyBufferSubData] = (void*)&glCopyBufferSubData; + FunctionTable[Gl3FuncName::GetUniformIndices] = (void*)&glGetUniformIndices; + FunctionTable[Gl3FuncName::GetActiveUniformsiv] = (void*)&glGetActiveUniformsiv; + FunctionTable[Gl3FuncName::GetUniformBlockIndex] = (void*)&glGetUniformBlockIndex; + FunctionTable[Gl3FuncName::GetActiveUniformBlockiv] = (void*)&glGetActiveUniformBlockiv; + FunctionTable[Gl3FuncName::GetActiveUniformBlockName] = (void*)&glGetActiveUniformBlockName; + FunctionTable[Gl3FuncName::UniformBlockBinding] = (void*)&glUniformBlockBinding; + FunctionTable[Gl3FuncName::DrawArraysInstanced] = (void*)&glDrawArraysInstanced; + FunctionTable[Gl3FuncName::DrawElementsInstanced] = (void*)&glDrawElementsInstanced; + FunctionTable[Gl3FuncName::FenceSync] = (void*)&glFenceSync; + FunctionTable[Gl3FuncName::IsSync] = (void*)&glIsSync; + FunctionTable[Gl3FuncName::DeleteSync] = (void*)&glDeleteSync; + FunctionTable[Gl3FuncName::ClientWaitSync] = (void*)&glClientWaitSync; + FunctionTable[Gl3FuncName::WaitSync] = (void*)&glWaitSync; + FunctionTable[Gl3FuncName::GetInteger64v] = (void*)&glGetInteger64v; + FunctionTable[Gl3FuncName::GetSynciv] = (void*)&glGetSynciv; + FunctionTable[Gl3FuncName::GetInteger64i_v] = (void*)&glGetInteger64i_v; + FunctionTable[Gl3FuncName::GetBufferParameteri64v] = (void*)&glGetBufferParameteri64v; + FunctionTable[Gl3FuncName::GenSamplers] = (void*)&glGenSamplers; + FunctionTable[Gl3FuncName::DeleteSamplers] = (void*)&glDeleteSamplers; + FunctionTable[Gl3FuncName::IsSampler] = (void*)&glIsSampler; + FunctionTable[Gl3FuncName::BindSampler] = (void*)&glBindSampler; + FunctionTable[Gl3FuncName::SamplerParameteri] = (void*)&glSamplerParameteri; + FunctionTable[Gl3FuncName::SamplerParameteriv] = (void*)&glSamplerParameteriv; + FunctionTable[Gl3FuncName::SamplerParameterf] = (void*)&glSamplerParameterf; + FunctionTable[Gl3FuncName::SamplerParameterfv] = (void*)&glSamplerParameterfv; + FunctionTable[Gl3FuncName::GetSamplerParameteriv] = (void*)&glGetSamplerParameteriv; + FunctionTable[Gl3FuncName::GetSamplerParameterfv] = (void*)&glGetSamplerParameterfv; + FunctionTable[Gl3FuncName::VertexAttribDivisor] = (void*)&glVertexAttribDivisor; + FunctionTable[Gl3FuncName::BindTransformFeedback] = (void*)&glBindTransformFeedback; + FunctionTable[Gl3FuncName::DeleteTransformFeedbacks] = (void*)&glDeleteTransformFeedbacks; + FunctionTable[Gl3FuncName::GenTransformFeedbacks] = (void*)&glGenTransformFeedbacks; + FunctionTable[Gl3FuncName::IsTransformFeedback] = (void*)&glIsTransformFeedback; + FunctionTable[Gl3FuncName::PauseTransformFeedback] = (void*)&glPauseTransformFeedback; + FunctionTable[Gl3FuncName::ResumeTransformFeedback] = (void*)&glResumeTransformFeedback; + FunctionTable[Gl3FuncName::GetProgramBinary] = (void*)&glGetProgramBinary; + FunctionTable[Gl3FuncName::ProgramBinary] = (void*)&glProgramBinary; + FunctionTable[Gl3FuncName::ProgramParameteri] = (void*)&glProgramParameteri; + FunctionTable[Gl3FuncName::InvalidateFramebuffer] = (void*)&glInvalidateFramebuffer; + FunctionTable[Gl3FuncName::InvalidateSubFramebuffer] = (void*)&glInvalidateSubFramebuffer; + FunctionTable[Gl3FuncName::TexStorage2D] = (void*)&glTexStorage2D; + FunctionTable[Gl3FuncName::TexStorage3D] = (void*)&glTexStorage3D; + FunctionTable[Gl3FuncName::GetInternalformativ] = (void*)&glGetInternalformativ; +#endif + } + return FunctionTable[funcname]; +} +} +} +#ifndef DYNAMICGLES_NO_NAMESPACE +namespace gl { +#elif TARGET_OS_IPHONE +namespace gl { +namespace internals { +#endif +inline void DYNAMICGLES_FUNCTION(ReadBuffer)(GLenum src) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glReadBuffer) PFNGLREADBUFFERPROC; +#endif + PFNGLREADBUFFERPROC _ReadBuffer = (PFNGLREADBUFFERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ReadBuffer); + return _ReadBuffer(src); +} +inline void DYNAMICGLES_FUNCTION(DrawRangeElements)(GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void* indices) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDrawRangeElements) PFNGLDRAWRANGEELEMENTSPROC; +#endif + PFNGLDRAWRANGEELEMENTSPROC _DrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DrawRangeElements); + return _DrawRangeElements(mode, start, end, count, type, indices); +} +inline void DYNAMICGLES_FUNCTION(TexImage3D)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void* pixels) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexImage3D) PFNGLTEXIMAGE3DPROC; +#endif + PFNGLTEXIMAGE3DPROC _TexImage3D = (PFNGLTEXIMAGE3DPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::TexImage3D); + return _TexImage3D(target, level, internalformat, width, height, depth, border, format, type, pixels); +} +inline void DYNAMICGLES_FUNCTION(TexSubImage3D)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void* pixels) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexSubImage3D) PFNGLTEXSUBIMAGE3DPROC; +#endif + PFNGLTEXSUBIMAGE3DPROC _TexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::TexSubImage3D); + return _TexSubImage3D(target, level, xoffset, yoffset, zoffset, width, height, depth, format, type, pixels); +} +inline void DYNAMICGLES_FUNCTION(CopyTexSubImage3D)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCopyTexSubImage3D) PFNGLCOPYTEXSUBIMAGE3DPROC; +#endif + PFNGLCOPYTEXSUBIMAGE3DPROC _CopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::CopyTexSubImage3D); + return _CopyTexSubImage3D(target, level, xoffset, yoffset, zoffset, x, y, width, height); +} +inline void DYNAMICGLES_FUNCTION(CompressedTexImage3D)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCompressedTexImage3D) PFNGLCOMPRESSEDTEXIMAGE3DPROC; +#endif + PFNGLCOMPRESSEDTEXIMAGE3DPROC _CompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::CompressedTexImage3D); + return _CompressedTexImage3D(target, level, internalformat, width, height, depth, border, imageSize, data); +} +inline void DYNAMICGLES_FUNCTION(CompressedTexSubImage3D)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCompressedTexSubImage3D) PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC; +#endif + PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC _CompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::CompressedTexSubImage3D); + return _CompressedTexSubImage3D(target, level, xoffset, yoffset, zoffset, width, height, depth, format, imageSize, data); +} +inline void DYNAMICGLES_FUNCTION(GenQueries)(GLsizei n, GLuint* ids) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenQueries) PFNGLGENQUERIESPROC; +#endif + PFNGLGENQUERIESPROC _GenQueries = (PFNGLGENQUERIESPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GenQueries); + return _GenQueries(n, ids); +} +inline void DYNAMICGLES_FUNCTION(DeleteQueries)(GLsizei n, const GLuint* ids) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteQueries) PFNGLDELETEQUERIESPROC; +#endif + PFNGLDELETEQUERIESPROC _DeleteQueries = (PFNGLDELETEQUERIESPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DeleteQueries); + return _DeleteQueries(n, ids); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsQuery)(GLuint id) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsQuery) PFNGLISQUERYPROC; +#endif + PFNGLISQUERYPROC _IsQuery = (PFNGLISQUERYPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::IsQuery); + return _IsQuery(id); +} +inline void DYNAMICGLES_FUNCTION(BeginQuery)(GLenum target, GLuint id) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBeginQuery) PFNGLBEGINQUERYPROC; +#endif + PFNGLBEGINQUERYPROC _BeginQuery = (PFNGLBEGINQUERYPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::BeginQuery); + return _BeginQuery(target, id); +} +inline void DYNAMICGLES_FUNCTION(EndQuery)(GLenum target) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glEndQuery) PFNGLENDQUERYPROC; +#endif + PFNGLENDQUERYPROC _EndQuery = (PFNGLENDQUERYPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::EndQuery); + return _EndQuery(target); +} +inline void DYNAMICGLES_FUNCTION(GetQueryiv)(GLenum target, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetQueryiv) PFNGLGETQUERYIVPROC; +#endif + PFNGLGETQUERYIVPROC _GetQueryiv = (PFNGLGETQUERYIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetQueryiv); + return _GetQueryiv(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetQueryObjectuiv)(GLuint id, GLenum pname, GLuint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetQueryObjectuiv) PFNGLGETQUERYOBJECTUIVPROC; +#endif + PFNGLGETQUERYOBJECTUIVPROC _GetQueryObjectuiv = (PFNGLGETQUERYOBJECTUIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetQueryObjectuiv); + return _GetQueryObjectuiv(id, pname, params); +} +inline GLboolean DYNAMICGLES_FUNCTION(UnmapBuffer)(GLenum target) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUnmapBuffer) PFNGLUNMAPBUFFERPROC; +#endif + PFNGLUNMAPBUFFERPROC _UnmapBuffer = (PFNGLUNMAPBUFFERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::UnmapBuffer); + return _UnmapBuffer(target); +} +inline void DYNAMICGLES_FUNCTION(GetBufferPointerv)(GLenum target, GLenum pname, void** params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetBufferPointerv) PFNGLGETBUFFERPOINTERVPROC; +#endif + PFNGLGETBUFFERPOINTERVPROC _GetBufferPointerv = (PFNGLGETBUFFERPOINTERVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetBufferPointerv); + return _GetBufferPointerv(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(DrawBuffers)(GLsizei n, const GLenum* bufs) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDrawBuffers) PFNGLDRAWBUFFERSPROC; +#endif + PFNGLDRAWBUFFERSPROC _DrawBuffers = (PFNGLDRAWBUFFERSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DrawBuffers); + return _DrawBuffers(n, bufs); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix2x3fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix2x3fv) PFNGLUNIFORMMATRIX2X3FVPROC; +#endif + PFNGLUNIFORMMATRIX2X3FVPROC _UniformMatrix2x3fv = (PFNGLUNIFORMMATRIX2X3FVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::UniformMatrix2x3fv); + return _UniformMatrix2x3fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix3x2fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix3x2fv) PFNGLUNIFORMMATRIX3X2FVPROC; +#endif + PFNGLUNIFORMMATRIX3X2FVPROC _UniformMatrix3x2fv = (PFNGLUNIFORMMATRIX3X2FVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::UniformMatrix3x2fv); + return _UniformMatrix3x2fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix2x4fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix2x4fv) PFNGLUNIFORMMATRIX2X4FVPROC; +#endif + PFNGLUNIFORMMATRIX2X4FVPROC _UniformMatrix2x4fv = (PFNGLUNIFORMMATRIX2X4FVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::UniformMatrix2x4fv); + return _UniformMatrix2x4fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix4x2fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix4x2fv) PFNGLUNIFORMMATRIX4X2FVPROC; +#endif + PFNGLUNIFORMMATRIX4X2FVPROC _UniformMatrix4x2fv = (PFNGLUNIFORMMATRIX4X2FVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::UniformMatrix4x2fv); + return _UniformMatrix4x2fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix3x4fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix3x4fv) PFNGLUNIFORMMATRIX3X4FVPROC; +#endif + PFNGLUNIFORMMATRIX3X4FVPROC _UniformMatrix3x4fv = (PFNGLUNIFORMMATRIX3X4FVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::UniformMatrix3x4fv); + return _UniformMatrix3x4fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix4x3fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix4x3fv) PFNGLUNIFORMMATRIX4X3FVPROC; +#endif + PFNGLUNIFORMMATRIX4X3FVPROC _UniformMatrix4x3fv = (PFNGLUNIFORMMATRIX4X3FVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::UniformMatrix4x3fv); + return _UniformMatrix4x3fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(BlitFramebuffer)(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBlitFramebuffer) PFNGLBLITFRAMEBUFFERPROC; +#endif + PFNGLBLITFRAMEBUFFERPROC _BlitFramebuffer = (PFNGLBLITFRAMEBUFFERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::BlitFramebuffer); + return _BlitFramebuffer(srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter); +} +inline void DYNAMICGLES_FUNCTION(RenderbufferStorageMultisample)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glRenderbufferStorageMultisample) PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC; +#endif + PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC _RenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::RenderbufferStorageMultisample); + return _RenderbufferStorageMultisample(target, samples, internalformat, width, height); +} +inline void DYNAMICGLES_FUNCTION(FramebufferTextureLayer)(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFramebufferTextureLayer) PFNGLFRAMEBUFFERTEXTURELAYERPROC; +#endif + PFNGLFRAMEBUFFERTEXTURELAYERPROC _FramebufferTextureLayer = (PFNGLFRAMEBUFFERTEXTURELAYERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::FramebufferTextureLayer); + return _FramebufferTextureLayer(target, attachment, texture, level, layer); +} +inline void* DYNAMICGLES_FUNCTION(MapBufferRange)(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glMapBufferRange) PFNGLMAPBUFFERRANGEPROC; +#endif + PFNGLMAPBUFFERRANGEPROC _MapBufferRange = (PFNGLMAPBUFFERRANGEPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::MapBufferRange); + return _MapBufferRange(target, offset, length, access); +} +inline void DYNAMICGLES_FUNCTION(FlushMappedBufferRange)(GLenum target, GLintptr offset, GLsizeiptr length) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFlushMappedBufferRange) PFNGLFLUSHMAPPEDBUFFERRANGEPROC; +#endif + PFNGLFLUSHMAPPEDBUFFERRANGEPROC _FlushMappedBufferRange = (PFNGLFLUSHMAPPEDBUFFERRANGEPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::FlushMappedBufferRange); + return _FlushMappedBufferRange(target, offset, length); +} +inline void DYNAMICGLES_FUNCTION(BindVertexArray)(GLuint array) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindVertexArray) PFNGLBINDVERTEXARRAYPROC; +#endif + PFNGLBINDVERTEXARRAYPROC _BindVertexArray = (PFNGLBINDVERTEXARRAYPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::BindVertexArray); + return _BindVertexArray(array); +} +inline void DYNAMICGLES_FUNCTION(DeleteVertexArrays)(GLsizei n, const GLuint* arrays) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteVertexArrays) PFNGLDELETEVERTEXARRAYSPROC; +#endif + PFNGLDELETEVERTEXARRAYSPROC _DeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DeleteVertexArrays); + return _DeleteVertexArrays(n, arrays); +} +inline void DYNAMICGLES_FUNCTION(GenVertexArrays)(GLsizei n, GLuint* arrays) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenVertexArrays) PFNGLGENVERTEXARRAYSPROC; +#endif + PFNGLGENVERTEXARRAYSPROC _GenVertexArrays = (PFNGLGENVERTEXARRAYSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GenVertexArrays); + return _GenVertexArrays(n, arrays); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsVertexArray)(GLuint array) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsVertexArray) PFNGLISVERTEXARRAYPROC; +#endif + PFNGLISVERTEXARRAYPROC _IsVertexArray = (PFNGLISVERTEXARRAYPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::IsVertexArray); + return _IsVertexArray(array); +} +inline void DYNAMICGLES_FUNCTION(GetIntegeri_v)(GLenum target, GLuint index, GLint* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetIntegeri_v) PFNGLGETINTEGERI_VPROC; +#endif + PFNGLGETINTEGERI_VPROC _GetIntegeri_v = (PFNGLGETINTEGERI_VPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetIntegeri_v); + return _GetIntegeri_v(target, index, data); +} +inline void DYNAMICGLES_FUNCTION(BeginTransformFeedback)(GLenum primitiveMode) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBeginTransformFeedback) PFNGLBEGINTRANSFORMFEEDBACKPROC; +#endif + PFNGLBEGINTRANSFORMFEEDBACKPROC _BeginTransformFeedback = (PFNGLBEGINTRANSFORMFEEDBACKPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::BeginTransformFeedback); + return _BeginTransformFeedback(primitiveMode); +} +inline void DYNAMICGLES_FUNCTION(EndTransformFeedback)(void) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glEndTransformFeedback) PFNGLENDTRANSFORMFEEDBACKPROC; +#endif + PFNGLENDTRANSFORMFEEDBACKPROC _EndTransformFeedback = (PFNGLENDTRANSFORMFEEDBACKPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::EndTransformFeedback); + return _EndTransformFeedback(); +} +inline void DYNAMICGLES_FUNCTION(BindBufferRange)(GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindBufferRange) PFNGLBINDBUFFERRANGEPROC; +#endif + PFNGLBINDBUFFERRANGEPROC _BindBufferRange = (PFNGLBINDBUFFERRANGEPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::BindBufferRange); + return _BindBufferRange(target, index, buffer, offset, size); +} +inline void DYNAMICGLES_FUNCTION(BindBufferBase)(GLenum target, GLuint index, GLuint buffer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindBufferBase) PFNGLBINDBUFFERBASEPROC; +#endif + PFNGLBINDBUFFERBASEPROC _BindBufferBase = (PFNGLBINDBUFFERBASEPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::BindBufferBase); + return _BindBufferBase(target, index, buffer); +} +inline void DYNAMICGLES_FUNCTION(TransformFeedbackVaryings)(GLuint program, GLsizei count, const GLchar* const* varyings, GLenum bufferMode) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTransformFeedbackVaryings) PFNGLTRANSFORMFEEDBACKVARYINGSPROC; +#endif + PFNGLTRANSFORMFEEDBACKVARYINGSPROC _TransformFeedbackVaryings = (PFNGLTRANSFORMFEEDBACKVARYINGSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::TransformFeedbackVaryings); + return _TransformFeedbackVaryings(program, count, varyings, bufferMode); + +} +inline void DYNAMICGLES_FUNCTION(GetTransformFeedbackVarying)(GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLsizei* size, GLenum* type, GLchar* name) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetTransformFeedbackVarying) PFNGLGETTRANSFORMFEEDBACKVARYINGPROC; +#endif + PFNGLGETTRANSFORMFEEDBACKVARYINGPROC _GetTransformFeedbackVarying = (PFNGLGETTRANSFORMFEEDBACKVARYINGPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetTransformFeedbackVarying); + return _GetTransformFeedbackVarying(program, index, bufSize, length, size, type, name); + +} +inline void DYNAMICGLES_FUNCTION(VertexAttribIPointer)(GLuint index, GLint size, GLenum type, GLsizei stride, const void* pointer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttribIPointer) PFNGLVERTEXATTRIBIPOINTERPROC; +#endif + PFNGLVERTEXATTRIBIPOINTERPROC _VertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::VertexAttribIPointer); + return _VertexAttribIPointer(index, size, type, stride, pointer); +} +inline void DYNAMICGLES_FUNCTION(GetVertexAttribIiv)(GLuint index, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetVertexAttribIiv) PFNGLGETVERTEXATTRIBIIVPROC; +#endif + PFNGLGETVERTEXATTRIBIIVPROC _GetVertexAttribIiv = (PFNGLGETVERTEXATTRIBIIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetVertexAttribIiv); + return _GetVertexAttribIiv(index, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetVertexAttribIuiv)(GLuint index, GLenum pname, GLuint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetVertexAttribIuiv) PFNGLGETVERTEXATTRIBIUIVPROC; +#endif + PFNGLGETVERTEXATTRIBIUIVPROC _GetVertexAttribIuiv = (PFNGLGETVERTEXATTRIBIUIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetVertexAttribIuiv); + return _GetVertexAttribIuiv(index, pname, params); +} +inline void DYNAMICGLES_FUNCTION(VertexAttribI4i)(GLuint index, GLint x, GLint y, GLint z, GLint w) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttribI4i) PFNGLVERTEXATTRIBI4IPROC; +#endif + PFNGLVERTEXATTRIBI4IPROC _VertexAttribI4i = (PFNGLVERTEXATTRIBI4IPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::VertexAttribI4i); + return _VertexAttribI4i(index, x, y, z, w); +} +inline void DYNAMICGLES_FUNCTION(VertexAttribI4ui)(GLuint index, GLuint x, GLuint y, GLuint z, GLuint w) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttribI4ui) PFNGLVERTEXATTRIBI4UIPROC; +#endif + PFNGLVERTEXATTRIBI4UIPROC _VertexAttribI4ui = (PFNGLVERTEXATTRIBI4UIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::VertexAttribI4ui); + return _VertexAttribI4ui(index, x, y, z, w); +} +inline void DYNAMICGLES_FUNCTION(VertexAttribI4iv)(GLuint index, const GLint* v) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttribI4iv) PFNGLVERTEXATTRIBI4IVPROC; +#endif + PFNGLVERTEXATTRIBI4IVPROC _VertexAttribI4iv = (PFNGLVERTEXATTRIBI4IVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::VertexAttribI4iv); + return _VertexAttribI4iv(index, v); +} +inline void DYNAMICGLES_FUNCTION(VertexAttribI4uiv)(GLuint index, const GLuint* v) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttribI4uiv) PFNGLVERTEXATTRIBI4UIVPROC; +#endif + PFNGLVERTEXATTRIBI4UIVPROC _VertexAttribI4uiv = (PFNGLVERTEXATTRIBI4UIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::VertexAttribI4uiv); + return _VertexAttribI4uiv(index, v); +} +inline void DYNAMICGLES_FUNCTION(GetUniformuiv)(GLuint program, GLint location, GLuint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetUniformuiv) PFNGLGETUNIFORMUIVPROC; +#endif + PFNGLGETUNIFORMUIVPROC _GetUniformuiv = (PFNGLGETUNIFORMUIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetUniformuiv); + return _GetUniformuiv(program, location, params); +} +inline GLint DYNAMICGLES_FUNCTION(GetFragDataLocation)(GLuint program, const GLchar* name) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetFragDataLocation) PFNGLGETFRAGDATALOCATIONPROC; +#endif + PFNGLGETFRAGDATALOCATIONPROC _GetFragDataLocation = (PFNGLGETFRAGDATALOCATIONPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetFragDataLocation); + return _GetFragDataLocation(program, name); +} +inline void DYNAMICGLES_FUNCTION(Uniform1ui)(GLint location, GLuint v0) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform1ui) PFNGLUNIFORM1UIPROC; +#endif + PFNGLUNIFORM1UIPROC _Uniform1ui = (PFNGLUNIFORM1UIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::Uniform1ui); + return _Uniform1ui(location, v0); +} +inline void DYNAMICGLES_FUNCTION(Uniform2ui)(GLint location, GLuint v0, GLuint v1) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform2ui) PFNGLUNIFORM2UIPROC; +#endif + PFNGLUNIFORM2UIPROC _Uniform2ui = (PFNGLUNIFORM2UIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::Uniform2ui); + return _Uniform2ui(location, v0, v1); +} +inline void DYNAMICGLES_FUNCTION(Uniform3ui)(GLint location, GLuint v0, GLuint v1, GLuint v2) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform3ui) PFNGLUNIFORM3UIPROC; +#endif + PFNGLUNIFORM3UIPROC _Uniform3ui = (PFNGLUNIFORM3UIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::Uniform3ui); + return _Uniform3ui(location, v0, v1, v2); +} +inline void DYNAMICGLES_FUNCTION(Uniform4ui)(GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform4ui) PFNGLUNIFORM4UIPROC; +#endif + PFNGLUNIFORM4UIPROC _Uniform4ui = (PFNGLUNIFORM4UIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::Uniform4ui); + return _Uniform4ui(location, v0, v1, v2, v3); +} +inline void DYNAMICGLES_FUNCTION(Uniform1uiv)(GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform1uiv) PFNGLUNIFORM1UIVPROC; +#endif + PFNGLUNIFORM1UIVPROC _Uniform1uiv = (PFNGLUNIFORM1UIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::Uniform1uiv); + return _Uniform1uiv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform2uiv)(GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform2uiv) PFNGLUNIFORM2UIVPROC; +#endif + PFNGLUNIFORM2UIVPROC _Uniform2uiv = (PFNGLUNIFORM2UIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::Uniform2uiv); + return _Uniform2uiv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform3uiv)(GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform3uiv) PFNGLUNIFORM3UIVPROC; +#endif + PFNGLUNIFORM3UIVPROC _Uniform3uiv = (PFNGLUNIFORM3UIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::Uniform3uiv); + return _Uniform3uiv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform4uiv)(GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform4uiv) PFNGLUNIFORM4UIVPROC; +#endif + PFNGLUNIFORM4UIVPROC _Uniform4uiv = (PFNGLUNIFORM4UIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::Uniform4uiv); + return _Uniform4uiv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(ClearBufferiv)(GLenum buffer, GLint drawbuffer, const GLint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClearBufferiv) PFNGLCLEARBUFFERIVPROC; +#endif + PFNGLCLEARBUFFERIVPROC _ClearBufferiv = (PFNGLCLEARBUFFERIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ClearBufferiv); + return _ClearBufferiv(buffer, drawbuffer, value); +} +inline void DYNAMICGLES_FUNCTION(ClearBufferuiv)(GLenum buffer, GLint drawbuffer, const GLuint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClearBufferuiv) PFNGLCLEARBUFFERUIVPROC; +#endif + PFNGLCLEARBUFFERUIVPROC _ClearBufferuiv = (PFNGLCLEARBUFFERUIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ClearBufferuiv); + return _ClearBufferuiv(buffer, drawbuffer, value); +} +inline void DYNAMICGLES_FUNCTION(ClearBufferfv)(GLenum buffer, GLint drawbuffer, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClearBufferfv) PFNGLCLEARBUFFERFVPROC; +#endif + PFNGLCLEARBUFFERFVPROC _ClearBufferfv = (PFNGLCLEARBUFFERFVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ClearBufferfv); + return _ClearBufferfv(buffer, drawbuffer, value); +} +inline void DYNAMICGLES_FUNCTION(ClearBufferfi)(GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClearBufferfi) PFNGLCLEARBUFFERFIPROC; +#endif + PFNGLCLEARBUFFERFIPROC _ClearBufferfi = (PFNGLCLEARBUFFERFIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ClearBufferfi); + return _ClearBufferfi(buffer, drawbuffer, depth, stencil); +} +inline const GLubyte* DYNAMICGLES_FUNCTION(GetStringi)(GLenum name, GLuint index) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetStringi) PFNGLGETSTRINGIPROC; +#endif + PFNGLGETSTRINGIPROC _GetStringi = (PFNGLGETSTRINGIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetStringi); + return _GetStringi(name, index); +} +inline void DYNAMICGLES_FUNCTION(CopyBufferSubData)(GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCopyBufferSubData) PFNGLCOPYBUFFERSUBDATAPROC; +#endif + PFNGLCOPYBUFFERSUBDATAPROC _CopyBufferSubData = (PFNGLCOPYBUFFERSUBDATAPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::CopyBufferSubData); + return _CopyBufferSubData(readTarget, writeTarget, readOffset, writeOffset, size); +} +inline void DYNAMICGLES_FUNCTION(GetUniformIndices)(GLuint program, GLsizei uniformCount, const GLchar* const* uniformNames, GLuint* uniformIndices) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetUniformIndices) PFNGLGETUNIFORMINDICESPROC; +#endif + PFNGLGETUNIFORMINDICESPROC _GetUniformIndices = (PFNGLGETUNIFORMINDICESPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetUniformIndices); + return _GetUniformIndices(program, uniformCount, uniformNames, uniformIndices); + +} +inline void DYNAMICGLES_FUNCTION(GetActiveUniformsiv)(GLuint program, GLsizei uniformCount, const GLuint* uniformIndices, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetActiveUniformsiv) PFNGLGETACTIVEUNIFORMSIVPROC; +#endif + PFNGLGETACTIVEUNIFORMSIVPROC _GetActiveUniformsiv = (PFNGLGETACTIVEUNIFORMSIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetActiveUniformsiv); + return _GetActiveUniformsiv(program, uniformCount, uniformIndices, pname, params); +} +inline GLuint DYNAMICGLES_FUNCTION(GetUniformBlockIndex)(GLuint program, const GLchar* uniformBlockName) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetUniformBlockIndex) PFNGLGETUNIFORMBLOCKINDEXPROC; +#endif + PFNGLGETUNIFORMBLOCKINDEXPROC _GetUniformBlockIndex = (PFNGLGETUNIFORMBLOCKINDEXPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetUniformBlockIndex); + return _GetUniformBlockIndex(program, uniformBlockName); +} +inline void DYNAMICGLES_FUNCTION(GetActiveUniformBlockiv)(GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetActiveUniformBlockiv) PFNGLGETACTIVEUNIFORMBLOCKIVPROC; +#endif + PFNGLGETACTIVEUNIFORMBLOCKIVPROC _GetActiveUniformBlockiv = (PFNGLGETACTIVEUNIFORMBLOCKIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetActiveUniformBlockiv); + return _GetActiveUniformBlockiv(program, uniformBlockIndex, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetActiveUniformBlockName)(GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei* length, GLchar* uniformBlockName) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetActiveUniformBlockName) PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC; +#endif + PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC _GetActiveUniformBlockName = (PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetActiveUniformBlockName); + return _GetActiveUniformBlockName(program, uniformBlockIndex, bufSize, length, uniformBlockName); + +} +inline void DYNAMICGLES_FUNCTION(UniformBlockBinding)(GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformBlockBinding) PFNGLUNIFORMBLOCKBINDINGPROC; +#endif + PFNGLUNIFORMBLOCKBINDINGPROC _UniformBlockBinding = (PFNGLUNIFORMBLOCKBINDINGPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::UniformBlockBinding); + return _UniformBlockBinding(program, uniformBlockIndex, uniformBlockBinding); +} +inline void DYNAMICGLES_FUNCTION(DrawArraysInstanced)(GLenum mode, GLint first, GLsizei count, GLsizei instancecount) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDrawArraysInstanced) PFNGLDRAWARRAYSINSTANCEDPROC; +#endif + PFNGLDRAWARRAYSINSTANCEDPROC _DrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DrawArraysInstanced); + return _DrawArraysInstanced(mode, first, count, instancecount); +} +inline void DYNAMICGLES_FUNCTION(DrawElementsInstanced)(GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei instancecount) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDrawElementsInstanced) PFNGLDRAWELEMENTSINSTANCEDPROC; +#endif + PFNGLDRAWELEMENTSINSTANCEDPROC _DrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DrawElementsInstanced); + return _DrawElementsInstanced(mode, count, type, indices, instancecount); +} +inline GLsync DYNAMICGLES_FUNCTION(FenceSync)(GLenum condition, GLbitfield flags) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFenceSync) PFNGLFENCESYNCPROC; +#endif + PFNGLFENCESYNCPROC _FenceSync = (PFNGLFENCESYNCPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::FenceSync); + return _FenceSync(condition, flags); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsSync)(GLsync sync) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsSync) PFNGLISSYNCPROC; +#endif + PFNGLISSYNCPROC _IsSync = (PFNGLISSYNCPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::IsSync); + return _IsSync(sync); +} +inline void DYNAMICGLES_FUNCTION(DeleteSync)(GLsync sync) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteSync) PFNGLDELETESYNCPROC; +#endif + PFNGLDELETESYNCPROC _DeleteSync = (PFNGLDELETESYNCPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DeleteSync); + return _DeleteSync(sync); +} +inline GLenum DYNAMICGLES_FUNCTION(ClientWaitSync)(GLsync sync, GLbitfield flags, GLuint64 timeout) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClientWaitSync) PFNGLCLIENTWAITSYNCPROC; +#endif + PFNGLCLIENTWAITSYNCPROC _ClientWaitSync = (PFNGLCLIENTWAITSYNCPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ClientWaitSync); + return _ClientWaitSync(sync, flags, timeout); +} +inline void DYNAMICGLES_FUNCTION(WaitSync)(GLsync sync, GLbitfield flags, GLuint64 timeout) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glWaitSync) PFNGLWAITSYNCPROC; +#endif + PFNGLWAITSYNCPROC _WaitSync = (PFNGLWAITSYNCPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::WaitSync); + return _WaitSync(sync, flags, timeout); +} +inline void DYNAMICGLES_FUNCTION(GetInteger64v)(GLenum pname, GLint64* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetInteger64v) PFNGLGETINTEGER64VPROC; +#endif + PFNGLGETINTEGER64VPROC _GetInteger64v = (PFNGLGETINTEGER64VPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetInteger64v); + return _GetInteger64v(pname, data); +} +inline void DYNAMICGLES_FUNCTION(GetSynciv)(GLsync sync, GLenum pname, GLsizei bufSize, GLsizei* length, GLint* values) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetSynciv) PFNGLGETSYNCIVPROC; +#endif + PFNGLGETSYNCIVPROC _GetSynciv = (PFNGLGETSYNCIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetSynciv); + return _GetSynciv(sync, pname, bufSize, length, values); +} +inline void DYNAMICGLES_FUNCTION(GetInteger64i_v)(GLenum target, GLuint index, GLint64* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetInteger64i_v) PFNGLGETINTEGER64I_VPROC; +#endif + PFNGLGETINTEGER64I_VPROC _GetInteger64i_v = (PFNGLGETINTEGER64I_VPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetInteger64i_v); + return _GetInteger64i_v(target, index, data); +} +inline void DYNAMICGLES_FUNCTION(GetBufferParameteri64v)(GLenum target, GLenum pname, GLint64* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetBufferParameteri64v) PFNGLGETBUFFERPARAMETERI64VPROC; +#endif + PFNGLGETBUFFERPARAMETERI64VPROC _GetBufferParameteri64v = (PFNGLGETBUFFERPARAMETERI64VPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetBufferParameteri64v); + return _GetBufferParameteri64v(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GenSamplers)(GLsizei count, GLuint* samplers) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenSamplers) PFNGLGENSAMPLERSPROC; +#endif + PFNGLGENSAMPLERSPROC _GenSamplers = (PFNGLGENSAMPLERSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GenSamplers); + return _GenSamplers(count, samplers); +} +inline void DYNAMICGLES_FUNCTION(DeleteSamplers)(GLsizei count, const GLuint* samplers) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteSamplers) PFNGLDELETESAMPLERSPROC; +#endif + PFNGLDELETESAMPLERSPROC _DeleteSamplers = (PFNGLDELETESAMPLERSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DeleteSamplers); + return _DeleteSamplers(count, samplers); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsSampler)(GLuint sampler) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsSampler) PFNGLISSAMPLERPROC; +#endif + PFNGLISSAMPLERPROC _IsSampler = (PFNGLISSAMPLERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::IsSampler); + return _IsSampler(sampler); +} +inline void DYNAMICGLES_FUNCTION(BindSampler)(GLuint unit, GLuint sampler) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindSampler) PFNGLBINDSAMPLERPROC; +#endif + PFNGLBINDSAMPLERPROC _BindSampler = (PFNGLBINDSAMPLERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::BindSampler); + return _BindSampler(unit, sampler); +} +inline void DYNAMICGLES_FUNCTION(SamplerParameteri)(GLuint sampler, GLenum pname, GLint param) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glSamplerParameteri) PFNGLSAMPLERPARAMETERIPROC; +#endif + PFNGLSAMPLERPARAMETERIPROC _SamplerParameteri = (PFNGLSAMPLERPARAMETERIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::SamplerParameteri); + return _SamplerParameteri(sampler, pname, param); +} +inline void DYNAMICGLES_FUNCTION(SamplerParameteriv)(GLuint sampler, GLenum pname, const GLint* param) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glSamplerParameteriv) PFNGLSAMPLERPARAMETERIVPROC; +#endif + PFNGLSAMPLERPARAMETERIVPROC _SamplerParameteriv = (PFNGLSAMPLERPARAMETERIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::SamplerParameteriv); + return _SamplerParameteriv(sampler, pname, param); +} +inline void DYNAMICGLES_FUNCTION(SamplerParameterf)(GLuint sampler, GLenum pname, GLfloat param) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glSamplerParameterf) PFNGLSAMPLERPARAMETERFPROC; +#endif + PFNGLSAMPLERPARAMETERFPROC _SamplerParameterf = (PFNGLSAMPLERPARAMETERFPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::SamplerParameterf); + return _SamplerParameterf(sampler, pname, param); +} +inline void DYNAMICGLES_FUNCTION(SamplerParameterfv)(GLuint sampler, GLenum pname, const GLfloat* param) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glSamplerParameterfv) PFNGLSAMPLERPARAMETERFVPROC; +#endif + PFNGLSAMPLERPARAMETERFVPROC _SamplerParameterfv = (PFNGLSAMPLERPARAMETERFVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::SamplerParameterfv); + return _SamplerParameterfv(sampler, pname, param); +} +inline void DYNAMICGLES_FUNCTION(GetSamplerParameteriv)(GLuint sampler, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetSamplerParameteriv) PFNGLGETSAMPLERPARAMETERIVPROC; +#endif + PFNGLGETSAMPLERPARAMETERIVPROC _GetSamplerParameteriv = (PFNGLGETSAMPLERPARAMETERIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetSamplerParameteriv); + return _GetSamplerParameteriv(sampler, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetSamplerParameterfv)(GLuint sampler, GLenum pname, GLfloat* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetSamplerParameterfv) PFNGLGETSAMPLERPARAMETERFVPROC; +#endif + PFNGLGETSAMPLERPARAMETERFVPROC _GetSamplerParameterfv = (PFNGLGETSAMPLERPARAMETERFVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetSamplerParameterfv); + return _GetSamplerParameterfv(sampler, pname, params); +} +inline void DYNAMICGLES_FUNCTION(VertexAttribDivisor)(GLuint index, GLuint divisor) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttribDivisor) PFNGLVERTEXATTRIBDIVISORPROC; +#endif + PFNGLVERTEXATTRIBDIVISORPROC _VertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::VertexAttribDivisor); + return _VertexAttribDivisor(index, divisor); +} +inline void DYNAMICGLES_FUNCTION(BindTransformFeedback)(GLenum target, GLuint id) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindTransformFeedback) PFNGLBINDTRANSFORMFEEDBACKPROC; +#endif + PFNGLBINDTRANSFORMFEEDBACKPROC _BindTransformFeedback = (PFNGLBINDTRANSFORMFEEDBACKPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::BindTransformFeedback); + return _BindTransformFeedback(target, id); +} +inline void DYNAMICGLES_FUNCTION(DeleteTransformFeedbacks)(GLsizei n, const GLuint* ids) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteTransformFeedbacks) PFNGLDELETETRANSFORMFEEDBACKSPROC; +#endif + PFNGLDELETETRANSFORMFEEDBACKSPROC _DeleteTransformFeedbacks = (PFNGLDELETETRANSFORMFEEDBACKSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::DeleteTransformFeedbacks); + return _DeleteTransformFeedbacks(n, ids); +} +inline void DYNAMICGLES_FUNCTION(GenTransformFeedbacks)(GLsizei n, GLuint* ids) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenTransformFeedbacks) PFNGLGENTRANSFORMFEEDBACKSPROC; +#endif + PFNGLGENTRANSFORMFEEDBACKSPROC _GenTransformFeedbacks = (PFNGLGENTRANSFORMFEEDBACKSPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GenTransformFeedbacks); + return _GenTransformFeedbacks(n, ids); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsTransformFeedback)(GLuint id) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsTransformFeedback) PFNGLISTRANSFORMFEEDBACKPROC; +#endif + PFNGLISTRANSFORMFEEDBACKPROC _IsTransformFeedback = (PFNGLISTRANSFORMFEEDBACKPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::IsTransformFeedback); + return _IsTransformFeedback(id); +} +inline void DYNAMICGLES_FUNCTION(PauseTransformFeedback)(void) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glPauseTransformFeedback) PFNGLPAUSETRANSFORMFEEDBACKPROC; +#endif + PFNGLPAUSETRANSFORMFEEDBACKPROC _PauseTransformFeedback = (PFNGLPAUSETRANSFORMFEEDBACKPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::PauseTransformFeedback); + return _PauseTransformFeedback(); +} +inline void DYNAMICGLES_FUNCTION(ResumeTransformFeedback)(void) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glResumeTransformFeedback) PFNGLRESUMETRANSFORMFEEDBACKPROC; +#endif + PFNGLRESUMETRANSFORMFEEDBACKPROC _ResumeTransformFeedback = (PFNGLRESUMETRANSFORMFEEDBACKPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ResumeTransformFeedback); + return _ResumeTransformFeedback(); +} +inline void DYNAMICGLES_FUNCTION(GetProgramBinary)(GLuint program, GLsizei bufSize, GLsizei* length, GLenum* binaryFormat, void* binary) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetProgramBinary) PFNGLGETPROGRAMBINARYPROC; +#endif + PFNGLGETPROGRAMBINARYPROC _GetProgramBinary = (PFNGLGETPROGRAMBINARYPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetProgramBinary); + return _GetProgramBinary(program, bufSize, length, binaryFormat, binary); +} +inline void DYNAMICGLES_FUNCTION(ProgramBinary)(GLuint program, GLenum binaryFormat, const void* binary, GLsizei length) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glProgramBinary) PFNGLPROGRAMBINARYPROC; +#endif + PFNGLPROGRAMBINARYPROC _ProgramBinary = (PFNGLPROGRAMBINARYPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ProgramBinary); + return _ProgramBinary(program, binaryFormat, binary, length); +} +inline void DYNAMICGLES_FUNCTION(ProgramParameteri)(GLuint program, GLenum pname, GLint value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glProgramParameteri) PFNGLPROGRAMPARAMETERIPROC; +#endif + PFNGLPROGRAMPARAMETERIPROC _ProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::ProgramParameteri); + return _ProgramParameteri(program, pname, value); +} +inline void DYNAMICGLES_FUNCTION(InvalidateFramebuffer)(GLenum target, GLsizei numAttachments, const GLenum* attachments) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glInvalidateFramebuffer) PFNGLINVALIDATEFRAMEBUFFERPROC; +#endif + PFNGLINVALIDATEFRAMEBUFFERPROC _InvalidateFramebuffer = (PFNGLINVALIDATEFRAMEBUFFERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::InvalidateFramebuffer); + return _InvalidateFramebuffer(target, numAttachments, attachments); +} +inline void DYNAMICGLES_FUNCTION(InvalidateSubFramebuffer)(GLenum target, GLsizei numAttachments, const GLenum* attachments, GLint x, GLint y, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glInvalidateSubFramebuffer) PFNGLINVALIDATESUBFRAMEBUFFERPROC; +#endif + PFNGLINVALIDATESUBFRAMEBUFFERPROC _InvalidateSubFramebuffer = (PFNGLINVALIDATESUBFRAMEBUFFERPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::InvalidateSubFramebuffer); + return _InvalidateSubFramebuffer(target, numAttachments, attachments, x, y, width, height); +} +inline void DYNAMICGLES_FUNCTION(TexStorage2D)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexStorage2D) PFNGLTEXSTORAGE2DPROC; +#endif + PFNGLTEXSTORAGE2DPROC _TexStorage2D = (PFNGLTEXSTORAGE2DPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::TexStorage2D); + return _TexStorage2D(target, levels, internalformat, width, height); +} +inline void DYNAMICGLES_FUNCTION(TexStorage3D)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexStorage3D) PFNGLTEXSTORAGE3DPROC; +#endif + PFNGLTEXSTORAGE3DPROC _TexStorage3D = (PFNGLTEXSTORAGE3DPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::TexStorage3D); + return _TexStorage3D(target, levels, internalformat, width, height, depth); +} +inline void DYNAMICGLES_FUNCTION(GetInternalformativ)(GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetInternalformativ) PFNGLGETINTERNALFORMATIVPROC; +#endif + PFNGLGETINTERNALFORMATIVPROC _GetInternalformativ = (PFNGLGETINTERNALFORMATIVPROC)gl::internals::getEs3Function(gl::internals::Gl3FuncName::GetInternalformativ); + return _GetInternalformativ(target, internalformat, pname, bufSize, params); +} +#ifndef DYNAMICGLES_NO_NAMESPACE +} +#elif TARGET_OS_IPHONE +} +} +#endif + +namespace gl { +namespace internals { +namespace Gl2FuncName { +enum OpenGLES2FunctionName +{ + ActiveTexture, AttachShader, BindAttribLocation, BindBuffer, BindFramebuffer, BindRenderbuffer, BindTexture, BlendColor, BlendEquation, BlendEquationSeparate, + BlendFunc, BlendFuncSeparate, BufferData, BufferSubData, CheckFramebufferStatus, Clear, ClearColor, ClearDepthf, ClearStencil, ColorMask, CompileShader, + CompressedTexImage2D, CompressedTexSubImage2D, CopyTexImage2D, CopyTexSubImage2D, CreateProgram, CreateShader, CullFace, DeleteBuffers, DeleteFramebuffers, + DeleteProgram, DeleteRenderbuffers, DeleteShader, DeleteTextures, DepthFunc, DepthMask, DepthRangef, DetachShader, Disable, DisableVertexAttribArray, + DrawArrays, DrawElements, Enable, EnableVertexAttribArray, Finish, Flush, FramebufferRenderbuffer, FramebufferTexture2D, FrontFace, GenBuffers, GenerateMipmap, + GenFramebuffers, GenRenderbuffers, GenTextures, GetActiveAttrib, GetActiveUniform, GetAttachedShaders, GetAttribLocation, GetBooleanv, GetBufferParameteriv, + GetError, GetFloatv, GetFramebufferAttachmentParameteriv, GetIntegerv, GetProgramiv, GetProgramInfoLog, GetRenderbufferParameteriv, GetShaderiv, GetShaderInfoLog, + GetShaderPrecisionFormat, GetShaderSource, GetString, GetTexParameterfv, GetTexParameteriv, GetUniformfv, GetUniformiv, GetUniformLocation, GetVertexAttribfv, + GetVertexAttribiv, GetVertexAttribPointerv, Hint, IsBuffer, IsEnabled, IsFramebuffer, IsProgram, IsRenderbuffer, IsShader, IsTexture, LineWidth, LinkProgram, + PixelStorei, PolygonOffset, ReadPixels, ReleaseShaderCompiler, RenderbufferStorage, SampleCoverage, Scissor, ShaderBinary, ShaderSource, StencilFunc, + StencilFuncSeparate, StencilMask, StencilMaskSeparate, StencilOp, StencilOpSeparate, TexImage2D, TexParameterf, TexParameterfv, TexParameteri, TexParameteriv, + TexSubImage2D, Uniform1f, Uniform1fv, Uniform1i, Uniform1iv, Uniform2f, Uniform2fv, Uniform2i, Uniform2iv, Uniform3f, Uniform3fv, Uniform3i, Uniform3iv, Uniform4f, + Uniform4fv, Uniform4i, Uniform4iv, UniformMatrix2fv, UniformMatrix3fv, UniformMatrix4fv, UseProgram, ValidateProgram, VertexAttrib1f, VertexAttrib1fv, + VertexAttrib2f, VertexAttrib2fv, VertexAttrib3f, VertexAttrib3fv, VertexAttrib4f, VertexAttrib4fv, VertexAttribPointer, Viewport, + NUMBER_OF_OPENGLES2_FUNCTIONS +}; +} + +inline void* getEs2Function(gl::internals::Gl2FuncName::OpenGLES2FunctionName funcname) +{ + static void* FunctionTable[Gl2FuncName::NUMBER_OF_OPENGLES2_FUNCTIONS]; + + // GET FUNCTION POINTERS --- ONCE!!!! /// + if (!FunctionTable[0]) + { +#if !TARGET_OS_IPHONE + pvr::lib::LIBTYPE lib = pvr::lib::openlib(gl::internals::libName); + if (!lib) + { + Log_Error("OpenGL ES Bindings: Failed to open library %s\n", internals::libName); + } + else + { + Log_Info("OpenGL ES Bindings: Successfully loaded library %s for OpenGL ES 2.0\n", libName); + } + + FunctionTable[Gl2FuncName::ActiveTexture] = pvr::lib::getLibFunctionChecked(lib, "glActiveTexture"); + FunctionTable[Gl2FuncName::AttachShader] = pvr::lib::getLibFunctionChecked(lib, "glAttachShader"); + FunctionTable[Gl2FuncName::BindAttribLocation] = pvr::lib::getLibFunctionChecked(lib, "glBindAttribLocation"); + FunctionTable[Gl2FuncName::BindBuffer] = pvr::lib::getLibFunctionChecked(lib, "glBindBuffer"); + FunctionTable[Gl2FuncName::BindFramebuffer] = pvr::lib::getLibFunctionChecked(lib, "glBindFramebuffer"); + FunctionTable[Gl2FuncName::BindRenderbuffer] = pvr::lib::getLibFunctionChecked(lib, "glBindRenderbuffer"); + FunctionTable[Gl2FuncName::BindTexture] = pvr::lib::getLibFunctionChecked(lib, "glBindTexture"); + FunctionTable[Gl2FuncName::BlendColor] = pvr::lib::getLibFunctionChecked(lib, "glBlendColor"); + FunctionTable[Gl2FuncName::BlendEquation] = pvr::lib::getLibFunctionChecked(lib, "glBlendEquation"); + FunctionTable[Gl2FuncName::BlendEquationSeparate] = pvr::lib::getLibFunctionChecked(lib, "glBlendEquationSeparate"); + FunctionTable[Gl2FuncName::BlendFunc] = pvr::lib::getLibFunctionChecked(lib, "glBlendFunc"); + FunctionTable[Gl2FuncName::BlendFuncSeparate] = pvr::lib::getLibFunctionChecked(lib, "glBlendFuncSeparate"); + FunctionTable[Gl2FuncName::BufferData] = pvr::lib::getLibFunctionChecked(lib, "glBufferData"); + FunctionTable[Gl2FuncName::BufferSubData] = pvr::lib::getLibFunctionChecked(lib, "glBufferSubData"); + FunctionTable[Gl2FuncName::CheckFramebufferStatus] = pvr::lib::getLibFunctionChecked(lib, "glCheckFramebufferStatus"); + FunctionTable[Gl2FuncName::Clear] = pvr::lib::getLibFunctionChecked(lib, "glClear"); + FunctionTable[Gl2FuncName::ClearColor] = pvr::lib::getLibFunctionChecked(lib, "glClearColor"); + FunctionTable[Gl2FuncName::ClearDepthf] = pvr::lib::getLibFunctionChecked(lib, "glClearDepthf"); + FunctionTable[Gl2FuncName::ClearStencil] = pvr::lib::getLibFunctionChecked(lib, "glClearStencil"); + FunctionTable[Gl2FuncName::ColorMask] = pvr::lib::getLibFunctionChecked(lib, "glColorMask"); + FunctionTable[Gl2FuncName::CompileShader] = pvr::lib::getLibFunctionChecked(lib, "glCompileShader"); + FunctionTable[Gl2FuncName::CompressedTexImage2D] = pvr::lib::getLibFunctionChecked(lib, "glCompressedTexImage2D"); + FunctionTable[Gl2FuncName::CompressedTexSubImage2D] = pvr::lib::getLibFunctionChecked(lib, "glCompressedTexSubImage2D"); + FunctionTable[Gl2FuncName::CopyTexImage2D] = pvr::lib::getLibFunctionChecked(lib, "glCopyTexImage2D"); + FunctionTable[Gl2FuncName::CopyTexSubImage2D] = pvr::lib::getLibFunctionChecked(lib, "glCopyTexSubImage2D"); + FunctionTable[Gl2FuncName::CreateProgram] = pvr::lib::getLibFunctionChecked(lib, "glCreateProgram"); + FunctionTable[Gl2FuncName::CreateShader] = pvr::lib::getLibFunctionChecked(lib, "glCreateShader"); + FunctionTable[Gl2FuncName::CullFace] = pvr::lib::getLibFunctionChecked(lib, "glCullFace"); + FunctionTable[Gl2FuncName::DeleteBuffers] = pvr::lib::getLibFunctionChecked(lib, "glDeleteBuffers"); + FunctionTable[Gl2FuncName::DeleteFramebuffers] = pvr::lib::getLibFunctionChecked(lib, "glDeleteFramebuffers"); + FunctionTable[Gl2FuncName::DeleteProgram] = pvr::lib::getLibFunctionChecked(lib, "glDeleteProgram"); + FunctionTable[Gl2FuncName::DeleteRenderbuffers] = pvr::lib::getLibFunctionChecked(lib, "glDeleteRenderbuffers"); + FunctionTable[Gl2FuncName::DeleteShader] = pvr::lib::getLibFunctionChecked(lib, "glDeleteShader"); + FunctionTable[Gl2FuncName::DeleteTextures] = pvr::lib::getLibFunctionChecked(lib, "glDeleteTextures"); + FunctionTable[Gl2FuncName::DepthFunc] = pvr::lib::getLibFunctionChecked(lib, "glDepthFunc"); + FunctionTable[Gl2FuncName::DepthMask] = pvr::lib::getLibFunctionChecked(lib, "glDepthMask"); + FunctionTable[Gl2FuncName::DepthRangef] = pvr::lib::getLibFunctionChecked(lib, "glDepthRangef"); + FunctionTable[Gl2FuncName::DetachShader] = pvr::lib::getLibFunctionChecked(lib, "glDetachShader"); + FunctionTable[Gl2FuncName::Disable] = pvr::lib::getLibFunctionChecked(lib, "glDisable"); + FunctionTable[Gl2FuncName::DisableVertexAttribArray] = pvr::lib::getLibFunctionChecked(lib, "glDisableVertexAttribArray"); + FunctionTable[Gl2FuncName::DrawArrays] = pvr::lib::getLibFunctionChecked(lib, "glDrawArrays"); + FunctionTable[Gl2FuncName::DrawElements] = pvr::lib::getLibFunctionChecked(lib, "glDrawElements"); + FunctionTable[Gl2FuncName::Enable] = pvr::lib::getLibFunctionChecked(lib, "glEnable"); + FunctionTable[Gl2FuncName::EnableVertexAttribArray] = pvr::lib::getLibFunctionChecked(lib, "glEnableVertexAttribArray"); + FunctionTable[Gl2FuncName::Finish] = pvr::lib::getLibFunctionChecked(lib, "glFinish"); + FunctionTable[Gl2FuncName::Flush] = pvr::lib::getLibFunctionChecked(lib, "glFlush"); + FunctionTable[Gl2FuncName::FramebufferRenderbuffer] = pvr::lib::getLibFunctionChecked(lib, "glFramebufferRenderbuffer"); + FunctionTable[Gl2FuncName::FramebufferTexture2D] = pvr::lib::getLibFunctionChecked(lib, "glFramebufferTexture2D"); + FunctionTable[Gl2FuncName::FrontFace] = pvr::lib::getLibFunctionChecked(lib, "glFrontFace"); + FunctionTable[Gl2FuncName::GenBuffers] = pvr::lib::getLibFunctionChecked(lib, "glGenBuffers"); + FunctionTable[Gl2FuncName::GenerateMipmap] = pvr::lib::getLibFunctionChecked(lib, "glGenerateMipmap"); + FunctionTable[Gl2FuncName::GenFramebuffers] = pvr::lib::getLibFunctionChecked(lib, "glGenFramebuffers"); + FunctionTable[Gl2FuncName::GenRenderbuffers] = pvr::lib::getLibFunctionChecked(lib, "glGenRenderbuffers"); + FunctionTable[Gl2FuncName::GenTextures] = pvr::lib::getLibFunctionChecked(lib, "glGenTextures"); + FunctionTable[Gl2FuncName::GetActiveAttrib] = pvr::lib::getLibFunctionChecked(lib, "glGetActiveAttrib"); + FunctionTable[Gl2FuncName::GetActiveUniform] = pvr::lib::getLibFunctionChecked(lib, "glGetActiveUniform"); + FunctionTable[Gl2FuncName::GetAttachedShaders] = pvr::lib::getLibFunctionChecked(lib, "glGetAttachedShaders"); + FunctionTable[Gl2FuncName::GetAttribLocation] = pvr::lib::getLibFunctionChecked(lib, "glGetAttribLocation"); + FunctionTable[Gl2FuncName::GetBooleanv] = pvr::lib::getLibFunctionChecked(lib, "glGetBooleanv"); + FunctionTable[Gl2FuncName::GetBufferParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glGetBufferParameteriv"); + FunctionTable[Gl2FuncName::GetError] = pvr::lib::getLibFunctionChecked(lib, "glGetError"); + FunctionTable[Gl2FuncName::GetFloatv] = pvr::lib::getLibFunctionChecked(lib, "glGetFloatv"); + FunctionTable[Gl2FuncName::GetFramebufferAttachmentParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glGetFramebufferAttachmentParameteriv"); + FunctionTable[Gl2FuncName::GetIntegerv] = pvr::lib::getLibFunctionChecked(lib, "glGetIntegerv"); + FunctionTable[Gl2FuncName::GetProgramiv] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramiv"); + FunctionTable[Gl2FuncName::GetProgramInfoLog] = pvr::lib::getLibFunctionChecked(lib, "glGetProgramInfoLog"); + FunctionTable[Gl2FuncName::GetRenderbufferParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glGetRenderbufferParameteriv"); + FunctionTable[Gl2FuncName::GetShaderiv] = pvr::lib::getLibFunctionChecked(lib, "glGetShaderiv"); + FunctionTable[Gl2FuncName::GetShaderInfoLog] = pvr::lib::getLibFunctionChecked(lib, "glGetShaderInfoLog"); + FunctionTable[Gl2FuncName::GetShaderPrecisionFormat] = pvr::lib::getLibFunctionChecked(lib, "glGetShaderPrecisionFormat"); + FunctionTable[Gl2FuncName::GetShaderSource] = pvr::lib::getLibFunctionChecked(lib, "glGetShaderSource"); + FunctionTable[Gl2FuncName::GetString] = pvr::lib::getLibFunctionChecked(lib, "glGetString"); + FunctionTable[Gl2FuncName::GetTexParameterfv] = pvr::lib::getLibFunctionChecked(lib, "glGetTexParameterfv"); + FunctionTable[Gl2FuncName::GetTexParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glGetTexParameteriv"); + FunctionTable[Gl2FuncName::GetUniformfv] = pvr::lib::getLibFunctionChecked(lib, "glGetUniformfv"); + FunctionTable[Gl2FuncName::GetUniformiv] = pvr::lib::getLibFunctionChecked(lib, "glGetUniformiv"); + FunctionTable[Gl2FuncName::GetUniformLocation] = pvr::lib::getLibFunctionChecked(lib, "glGetUniformLocation"); + FunctionTable[Gl2FuncName::GetVertexAttribfv] = pvr::lib::getLibFunctionChecked(lib, "glGetVertexAttribfv"); + FunctionTable[Gl2FuncName::GetVertexAttribiv] = pvr::lib::getLibFunctionChecked(lib, "glGetVertexAttribiv"); + FunctionTable[Gl2FuncName::GetVertexAttribPointerv] = pvr::lib::getLibFunctionChecked(lib, "glGetVertexAttribPointerv"); + FunctionTable[Gl2FuncName::Hint] = pvr::lib::getLibFunctionChecked(lib, "glHint"); + FunctionTable[Gl2FuncName::IsBuffer] = pvr::lib::getLibFunctionChecked(lib, "glIsBuffer"); + FunctionTable[Gl2FuncName::IsEnabled] = pvr::lib::getLibFunctionChecked(lib, "glIsEnabled"); + FunctionTable[Gl2FuncName::IsFramebuffer] = pvr::lib::getLibFunctionChecked(lib, "glIsFramebuffer"); + FunctionTable[Gl2FuncName::IsProgram] = pvr::lib::getLibFunctionChecked(lib, "glIsProgram"); + FunctionTable[Gl2FuncName::IsRenderbuffer] = pvr::lib::getLibFunctionChecked(lib, "glIsRenderbuffer"); + FunctionTable[Gl2FuncName::IsShader] = pvr::lib::getLibFunctionChecked(lib, "glIsShader"); + FunctionTable[Gl2FuncName::IsTexture] = pvr::lib::getLibFunctionChecked(lib, "glIsTexture"); + FunctionTable[Gl2FuncName::LineWidth] = pvr::lib::getLibFunctionChecked(lib, "glLineWidth"); + FunctionTable[Gl2FuncName::LinkProgram] = pvr::lib::getLibFunctionChecked(lib, "glLinkProgram"); + FunctionTable[Gl2FuncName::PixelStorei] = pvr::lib::getLibFunctionChecked(lib, "glPixelStorei"); + FunctionTable[Gl2FuncName::PolygonOffset] = pvr::lib::getLibFunctionChecked(lib, "glPolygonOffset"); + FunctionTable[Gl2FuncName::ReadPixels] = pvr::lib::getLibFunctionChecked(lib, "glReadPixels"); + FunctionTable[Gl2FuncName::ReleaseShaderCompiler] = pvr::lib::getLibFunctionChecked(lib, "glReleaseShaderCompiler"); + FunctionTable[Gl2FuncName::RenderbufferStorage] = pvr::lib::getLibFunctionChecked(lib, "glRenderbufferStorage"); + FunctionTable[Gl2FuncName::SampleCoverage] = pvr::lib::getLibFunctionChecked(lib, "glSampleCoverage"); + FunctionTable[Gl2FuncName::Scissor] = pvr::lib::getLibFunctionChecked(lib, "glScissor"); + FunctionTable[Gl2FuncName::ShaderBinary] = pvr::lib::getLibFunctionChecked(lib, "glShaderBinary"); + FunctionTable[Gl2FuncName::ShaderSource] = pvr::lib::getLibFunctionChecked(lib, "glShaderSource"); + FunctionTable[Gl2FuncName::StencilFunc] = pvr::lib::getLibFunctionChecked(lib, "glStencilFunc"); + FunctionTable[Gl2FuncName::StencilFuncSeparate] = pvr::lib::getLibFunctionChecked(lib, "glStencilFuncSeparate"); + FunctionTable[Gl2FuncName::StencilMask] = pvr::lib::getLibFunctionChecked(lib, "glStencilMask"); + FunctionTable[Gl2FuncName::StencilMaskSeparate] = pvr::lib::getLibFunctionChecked(lib, "glStencilMaskSeparate"); + FunctionTable[Gl2FuncName::StencilOp] = pvr::lib::getLibFunctionChecked(lib, "glStencilOp"); + FunctionTable[Gl2FuncName::StencilOpSeparate] = pvr::lib::getLibFunctionChecked(lib, "glStencilOpSeparate"); + FunctionTable[Gl2FuncName::TexImage2D] = pvr::lib::getLibFunctionChecked(lib, "glTexImage2D"); + FunctionTable[Gl2FuncName::TexParameterf] = pvr::lib::getLibFunctionChecked(lib, "glTexParameterf"); + FunctionTable[Gl2FuncName::TexParameterfv] = pvr::lib::getLibFunctionChecked(lib, "glTexParameterfv"); + FunctionTable[Gl2FuncName::TexParameteri] = pvr::lib::getLibFunctionChecked(lib, "glTexParameteri"); + FunctionTable[Gl2FuncName::TexParameteriv] = pvr::lib::getLibFunctionChecked(lib, "glTexParameteriv"); + FunctionTable[Gl2FuncName::TexSubImage2D] = pvr::lib::getLibFunctionChecked(lib, "glTexSubImage2D"); + FunctionTable[Gl2FuncName::Uniform1f] = pvr::lib::getLibFunctionChecked(lib, "glUniform1f"); + FunctionTable[Gl2FuncName::Uniform1fv] = pvr::lib::getLibFunctionChecked(lib, "glUniform1fv"); + FunctionTable[Gl2FuncName::Uniform1i] = pvr::lib::getLibFunctionChecked(lib, "glUniform1i"); + FunctionTable[Gl2FuncName::Uniform1iv] = pvr::lib::getLibFunctionChecked(lib, "glUniform1iv"); + FunctionTable[Gl2FuncName::Uniform2f] = pvr::lib::getLibFunctionChecked(lib, "glUniform2f"); + FunctionTable[Gl2FuncName::Uniform2fv] = pvr::lib::getLibFunctionChecked(lib, "glUniform2fv"); + FunctionTable[Gl2FuncName::Uniform2i] = pvr::lib::getLibFunctionChecked(lib, "glUniform2i"); + FunctionTable[Gl2FuncName::Uniform2iv] = pvr::lib::getLibFunctionChecked(lib, "glUniform2iv"); + FunctionTable[Gl2FuncName::Uniform3f] = pvr::lib::getLibFunctionChecked(lib, "glUniform3f"); + FunctionTable[Gl2FuncName::Uniform3fv] = pvr::lib::getLibFunctionChecked(lib, "glUniform3fv"); + FunctionTable[Gl2FuncName::Uniform3i] = pvr::lib::getLibFunctionChecked(lib, "glUniform3i"); + FunctionTable[Gl2FuncName::Uniform3iv] = pvr::lib::getLibFunctionChecked(lib, "glUniform3iv"); + FunctionTable[Gl2FuncName::Uniform4f] = pvr::lib::getLibFunctionChecked(lib, "glUniform4f"); + FunctionTable[Gl2FuncName::Uniform4fv] = pvr::lib::getLibFunctionChecked(lib, "glUniform4fv"); + FunctionTable[Gl2FuncName::Uniform4i] = pvr::lib::getLibFunctionChecked(lib, "glUniform4i"); + FunctionTable[Gl2FuncName::Uniform4iv] = pvr::lib::getLibFunctionChecked(lib, "glUniform4iv"); + FunctionTable[Gl2FuncName::UniformMatrix2fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix2fv"); + FunctionTable[Gl2FuncName::UniformMatrix3fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix3fv"); + FunctionTable[Gl2FuncName::UniformMatrix4fv] = pvr::lib::getLibFunctionChecked(lib, "glUniformMatrix4fv"); + FunctionTable[Gl2FuncName::UseProgram] = pvr::lib::getLibFunctionChecked(lib, "glUseProgram"); + FunctionTable[Gl2FuncName::ValidateProgram] = pvr::lib::getLibFunctionChecked(lib, "glValidateProgram"); + FunctionTable[Gl2FuncName::VertexAttrib1f] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttrib1f"); + FunctionTable[Gl2FuncName::VertexAttrib1fv] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttrib1fv"); + FunctionTable[Gl2FuncName::VertexAttrib2f] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttrib2f"); + FunctionTable[Gl2FuncName::VertexAttrib2fv] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttrib2fv"); + FunctionTable[Gl2FuncName::VertexAttrib3f] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttrib3f"); + FunctionTable[Gl2FuncName::VertexAttrib3fv] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttrib3fv"); + FunctionTable[Gl2FuncName::VertexAttrib4f] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttrib4f"); + FunctionTable[Gl2FuncName::VertexAttrib4fv] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttrib4fv"); + FunctionTable[Gl2FuncName::VertexAttribPointer] = pvr::lib::getLibFunctionChecked(lib, "glVertexAttribPointer"); + FunctionTable[Gl2FuncName::Viewport] = pvr::lib::getLibFunctionChecked(lib, "glViewport"); +#else + FunctionTable[Gl2FuncName::ActiveTexture] = (void*)&glActiveTexture; + FunctionTable[Gl2FuncName::AttachShader] = (void*)&glAttachShader; + FunctionTable[Gl2FuncName::BindAttribLocation] = (void*)&glBindAttribLocation; + FunctionTable[Gl2FuncName::BindBuffer] = (void*)&glBindBuffer; + FunctionTable[Gl2FuncName::BindFramebuffer] = (void*)&glBindFramebuffer; + FunctionTable[Gl2FuncName::BindRenderbuffer] = (void*)&glBindRenderbuffer; + FunctionTable[Gl2FuncName::BindTexture] = (void*)&glBindTexture; + FunctionTable[Gl2FuncName::BlendColor] = (void*)&glBlendColor; + FunctionTable[Gl2FuncName::BlendEquation] = (void*)&glBlendEquation; + FunctionTable[Gl2FuncName::BlendEquationSeparate] = (void*)&glBlendEquationSeparate; + FunctionTable[Gl2FuncName::BlendFunc] = (void*)&glBlendFunc; + FunctionTable[Gl2FuncName::BlendFuncSeparate] = (void*)&glBlendFuncSeparate; + FunctionTable[Gl2FuncName::BufferData] = (void*)&glBufferData; + FunctionTable[Gl2FuncName::BufferSubData] = (void*)&glBufferSubData; + FunctionTable[Gl2FuncName::CheckFramebufferStatus] = (void*)&glCheckFramebufferStatus; + FunctionTable[Gl2FuncName::Clear] = (void*)&glClear; + FunctionTable[Gl2FuncName::ClearColor] = (void*)&glClearColor; + FunctionTable[Gl2FuncName::ClearDepthf] = (void*)&glClearDepthf; + FunctionTable[Gl2FuncName::ClearStencil] = (void*)&glClearStencil; + FunctionTable[Gl2FuncName::ColorMask] = (void*)&glColorMask; + FunctionTable[Gl2FuncName::CompileShader] = (void*)&glCompileShader; + FunctionTable[Gl2FuncName::CompressedTexImage2D] = (void*)&glCompressedTexImage2D; + FunctionTable[Gl2FuncName::CompressedTexSubImage2D] = (void*)&glCompressedTexSubImage2D; + FunctionTable[Gl2FuncName::CopyTexImage2D] = (void*)&glCopyTexImage2D; + FunctionTable[Gl2FuncName::CopyTexSubImage2D] = (void*)&glCopyTexSubImage2D; + FunctionTable[Gl2FuncName::CreateProgram] = (void*)&glCreateProgram; + FunctionTable[Gl2FuncName::CreateShader] = (void*)&glCreateShader; + FunctionTable[Gl2FuncName::CullFace] = (void*)&glCullFace; + FunctionTable[Gl2FuncName::DeleteBuffers] = (void*)&glDeleteBuffers; + FunctionTable[Gl2FuncName::DeleteFramebuffers] = (void*)&glDeleteFramebuffers; + FunctionTable[Gl2FuncName::DeleteProgram] = (void*)&glDeleteProgram; + FunctionTable[Gl2FuncName::DeleteRenderbuffers] = (void*)&glDeleteRenderbuffers; + FunctionTable[Gl2FuncName::DeleteShader] = (void*)&glDeleteShader; + FunctionTable[Gl2FuncName::DeleteTextures] = (void*)&glDeleteTextures; + FunctionTable[Gl2FuncName::DepthFunc] = (void*)&glDepthFunc; + FunctionTable[Gl2FuncName::DepthMask] = (void*)&glDepthMask; + FunctionTable[Gl2FuncName::DepthRangef] = (void*)&glDepthRangef; + FunctionTable[Gl2FuncName::DetachShader] = (void*)&glDetachShader; + FunctionTable[Gl2FuncName::Disable] = (void*)&glDisable; + FunctionTable[Gl2FuncName::DisableVertexAttribArray] = (void*)&glDisableVertexAttribArray; + FunctionTable[Gl2FuncName::DrawArrays] = (void*)&glDrawArrays; + FunctionTable[Gl2FuncName::DrawElements] = (void*)&glDrawElements; + FunctionTable[Gl2FuncName::Enable] = (void*)&glEnable; + FunctionTable[Gl2FuncName::EnableVertexAttribArray] = (void*)&glEnableVertexAttribArray; + FunctionTable[Gl2FuncName::Finish] = (void*)&glFinish; + FunctionTable[Gl2FuncName::Flush] = (void*)&glFlush; + FunctionTable[Gl2FuncName::FramebufferRenderbuffer] = (void*)&glFramebufferRenderbuffer; + FunctionTable[Gl2FuncName::FramebufferTexture2D] = (void*)&glFramebufferTexture2D; + FunctionTable[Gl2FuncName::FrontFace] = (void*)&glFrontFace; + FunctionTable[Gl2FuncName::GenBuffers] = (void*)&glGenBuffers; + FunctionTable[Gl2FuncName::GenerateMipmap] = (void*)&glGenerateMipmap; + FunctionTable[Gl2FuncName::GenFramebuffers] = (void*)&glGenFramebuffers; + FunctionTable[Gl2FuncName::GenRenderbuffers] = (void*)&glGenRenderbuffers; + FunctionTable[Gl2FuncName::GenTextures] = (void*)&glGenTextures; + FunctionTable[Gl2FuncName::GetActiveAttrib] = (void*)&glGetActiveAttrib; + FunctionTable[Gl2FuncName::GetActiveUniform] = (void*)&glGetActiveUniform; + FunctionTable[Gl2FuncName::GetAttachedShaders] = (void*)&glGetAttachedShaders; + FunctionTable[Gl2FuncName::GetAttribLocation] = (void*)&glGetAttribLocation; + FunctionTable[Gl2FuncName::GetBooleanv] = (void*)&glGetBooleanv; + FunctionTable[Gl2FuncName::GetBufferParameteriv] = (void*)&glGetBufferParameteriv; + FunctionTable[Gl2FuncName::GetError] = (void*)&glGetError; + FunctionTable[Gl2FuncName::GetFloatv] = (void*)&glGetFloatv; + FunctionTable[Gl2FuncName::GetFramebufferAttachmentParameteriv] = (void*)&glGetFramebufferAttachmentParameteriv; + FunctionTable[Gl2FuncName::GetIntegerv] = (void*)&glGetIntegerv; + FunctionTable[Gl2FuncName::GetProgramiv] = (void*)&glGetProgramiv; + FunctionTable[Gl2FuncName::GetProgramInfoLog] = (void*)&glGetProgramInfoLog; + FunctionTable[Gl2FuncName::GetRenderbufferParameteriv] = (void*)&glGetRenderbufferParameteriv; + FunctionTable[Gl2FuncName::GetShaderiv] = (void*)&glGetShaderiv; + FunctionTable[Gl2FuncName::GetShaderInfoLog] = (void*)&glGetShaderInfoLog; + FunctionTable[Gl2FuncName::GetShaderPrecisionFormat] = (void*)&glGetShaderPrecisionFormat; + FunctionTable[Gl2FuncName::GetShaderSource] = (void*)&glGetShaderSource; + FunctionTable[Gl2FuncName::GetString] = (void*)&glGetString; + FunctionTable[Gl2FuncName::GetTexParameterfv] = (void*)&glGetTexParameterfv; + FunctionTable[Gl2FuncName::GetTexParameteriv] = (void*)&glGetTexParameteriv; + FunctionTable[Gl2FuncName::GetUniformfv] = (void*)&glGetUniformfv; + FunctionTable[Gl2FuncName::GetUniformiv] = (void*)&glGetUniformiv; + FunctionTable[Gl2FuncName::GetUniformLocation] = (void*)&glGetUniformLocation; + FunctionTable[Gl2FuncName::GetVertexAttribfv] = (void*)&glGetVertexAttribfv; + FunctionTable[Gl2FuncName::GetVertexAttribiv] = (void*)&glGetVertexAttribiv; + FunctionTable[Gl2FuncName::GetVertexAttribPointerv] = (void*)&glGetVertexAttribPointerv; + FunctionTable[Gl2FuncName::Hint] = (void*)&glHint; + FunctionTable[Gl2FuncName::IsBuffer] = (void*)&glIsBuffer; + FunctionTable[Gl2FuncName::IsEnabled] = (void*)&glIsEnabled; + FunctionTable[Gl2FuncName::IsFramebuffer] = (void*)&glIsFramebuffer; + FunctionTable[Gl2FuncName::IsProgram] = (void*)&glIsProgram; + FunctionTable[Gl2FuncName::IsRenderbuffer] = (void*)&glIsRenderbuffer; + FunctionTable[Gl2FuncName::IsShader] = (void*)&glIsShader; + FunctionTable[Gl2FuncName::IsTexture] = (void*)&glIsTexture; + FunctionTable[Gl2FuncName::LineWidth] = (void*)&glLineWidth; + FunctionTable[Gl2FuncName::LinkProgram] = (void*)&glLinkProgram; + FunctionTable[Gl2FuncName::PixelStorei] = (void*)&glPixelStorei; + FunctionTable[Gl2FuncName::PolygonOffset] = (void*)&glPolygonOffset; + FunctionTable[Gl2FuncName::ReadPixels] = (void*)&glReadPixels; + FunctionTable[Gl2FuncName::ReleaseShaderCompiler] = (void*)&glReleaseShaderCompiler; + FunctionTable[Gl2FuncName::RenderbufferStorage] = (void*)&glRenderbufferStorage; + FunctionTable[Gl2FuncName::SampleCoverage] = (void*)&glSampleCoverage; + FunctionTable[Gl2FuncName::Scissor] = (void*)&glScissor; + FunctionTable[Gl2FuncName::ShaderBinary] = (void*)&glShaderBinary; + FunctionTable[Gl2FuncName::ShaderSource] = (void*)&glShaderSource; + FunctionTable[Gl2FuncName::StencilFunc] = (void*)&glStencilFunc; + FunctionTable[Gl2FuncName::StencilFuncSeparate] = (void*)&glStencilFuncSeparate; + FunctionTable[Gl2FuncName::StencilMask] = (void*)&glStencilMask; + FunctionTable[Gl2FuncName::StencilMaskSeparate] = (void*)&glStencilMaskSeparate; + FunctionTable[Gl2FuncName::StencilOp] = (void*)&glStencilOp; + FunctionTable[Gl2FuncName::StencilOpSeparate] = (void*)&glStencilOpSeparate; + FunctionTable[Gl2FuncName::TexImage2D] = (void*)&glTexImage2D; + FunctionTable[Gl2FuncName::TexParameterf] = (void*)&glTexParameterf; + FunctionTable[Gl2FuncName::TexParameterfv] = (void*)&glTexParameterfv; + FunctionTable[Gl2FuncName::TexParameteri] = (void*)&glTexParameteri; + FunctionTable[Gl2FuncName::TexParameteriv] = (void*)&glTexParameteriv; + FunctionTable[Gl2FuncName::TexSubImage2D] = (void*)&glTexSubImage2D; + FunctionTable[Gl2FuncName::Uniform1f] = (void*)&glUniform1f; + FunctionTable[Gl2FuncName::Uniform1fv] = (void*)&glUniform1fv; + FunctionTable[Gl2FuncName::Uniform1i] = (void*)&glUniform1i; + FunctionTable[Gl2FuncName::Uniform1iv] = (void*)&glUniform1iv; + FunctionTable[Gl2FuncName::Uniform2f] = (void*)&glUniform2f; + FunctionTable[Gl2FuncName::Uniform2fv] = (void*)&glUniform2fv; + FunctionTable[Gl2FuncName::Uniform2i] = (void*)&glUniform2i; + FunctionTable[Gl2FuncName::Uniform2iv] = (void*)&glUniform2iv; + FunctionTable[Gl2FuncName::Uniform3f] = (void*)&glUniform3f; + FunctionTable[Gl2FuncName::Uniform3fv] = (void*)&glUniform3fv; + FunctionTable[Gl2FuncName::Uniform3i] = (void*)&glUniform3i; + FunctionTable[Gl2FuncName::Uniform3iv] = (void*)&glUniform3iv; + FunctionTable[Gl2FuncName::Uniform4f] = (void*)&glUniform4f; + FunctionTable[Gl2FuncName::Uniform4fv] = (void*)&glUniform4fv; + FunctionTable[Gl2FuncName::Uniform4i] = (void*)&glUniform4i; + FunctionTable[Gl2FuncName::Uniform4iv] = (void*)&glUniform4iv; + FunctionTable[Gl2FuncName::UniformMatrix2fv] = (void*)&glUniformMatrix2fv; + FunctionTable[Gl2FuncName::UniformMatrix3fv] = (void*)&glUniformMatrix3fv; + FunctionTable[Gl2FuncName::UniformMatrix4fv] = (void*)&glUniformMatrix4fv; + FunctionTable[Gl2FuncName::UseProgram] = (void*)&glUseProgram; + FunctionTable[Gl2FuncName::ValidateProgram] = (void*)&glValidateProgram; + FunctionTable[Gl2FuncName::VertexAttrib1f] = (void*)&glVertexAttrib1f; + FunctionTable[Gl2FuncName::VertexAttrib1fv] = (void*)&glVertexAttrib1fv; + FunctionTable[Gl2FuncName::VertexAttrib2f] = (void*)&glVertexAttrib2f; + FunctionTable[Gl2FuncName::VertexAttrib2fv] = (void*)&glVertexAttrib2fv; + FunctionTable[Gl2FuncName::VertexAttrib3f] = (void*)&glVertexAttrib3f; + FunctionTable[Gl2FuncName::VertexAttrib3fv] = (void*)&glVertexAttrib3fv; + FunctionTable[Gl2FuncName::VertexAttrib4f] = (void*)&glVertexAttrib4f; + FunctionTable[Gl2FuncName::VertexAttrib4fv] = (void*)&glVertexAttrib4fv; + FunctionTable[Gl2FuncName::VertexAttribPointer] = (void*)&glVertexAttribPointer; + FunctionTable[Gl2FuncName::Viewport] = (void*)&glViewport; + +#endif + } + return FunctionTable[funcname]; +} +} +} + + +#ifndef DYNAMICGLES_NO_NAMESPACE +namespace gl { +#elif TARGET_OS_IPHONE +namespace gl { +namespace internals { +#endif + +inline void DYNAMICGLES_FUNCTION(ActiveTexture)(GLenum texture) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glActiveTexture) PFNGLACTIVETEXTUREPROC; +#endif + PFNGLACTIVETEXTUREPROC _ActiveTexture = (PFNGLACTIVETEXTUREPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ActiveTexture); + return _ActiveTexture(texture); +} + +inline void DYNAMICGLES_FUNCTION(AttachShader)(GLuint program, GLuint shader) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glAttachShader) PFNGLATTACHSHADERPROC; +#endif + PFNGLATTACHSHADERPROC _AttachShader = (PFNGLATTACHSHADERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::AttachShader); + return _AttachShader(program, shader); +} +inline void DYNAMICGLES_FUNCTION(BindAttribLocation)(GLuint program, GLuint index, const GLchar* name) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindAttribLocation) PFNGLBINDATTRIBLOCATIONPROC; +#endif + PFNGLBINDATTRIBLOCATIONPROC _BindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BindAttribLocation); + return _BindAttribLocation(program, index, name); +} +inline void DYNAMICGLES_FUNCTION(BindBuffer)(GLenum target, GLuint buffer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindBuffer) PFNGLBINDBUFFERPROC; +#endif + PFNGLBINDBUFFERPROC _BindBuffer = (PFNGLBINDBUFFERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BindBuffer); + return _BindBuffer(target, buffer); +} +inline void DYNAMICGLES_FUNCTION(BindFramebuffer)(GLenum target, GLuint framebuffer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindFramebuffer) PFNGLBINDFRAMEBUFFERPROC; +#endif + PFNGLBINDFRAMEBUFFERPROC _BindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BindFramebuffer); + return _BindFramebuffer(target, framebuffer); +} +inline void DYNAMICGLES_FUNCTION(BindRenderbuffer)(GLenum target, GLuint renderbuffer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindRenderbuffer) PFNGLBINDRENDERBUFFERPROC; +#endif + PFNGLBINDRENDERBUFFERPROC _BindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BindRenderbuffer); + return _BindRenderbuffer(target, renderbuffer); +} +inline void DYNAMICGLES_FUNCTION(BindTexture)(GLenum target, GLuint texture) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindTexture) PFNGLBINDTEXTUREPROC; +#endif + PFNGLBINDTEXTUREPROC _BindTexture = (PFNGLBINDTEXTUREPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BindTexture); + return _BindTexture(target, texture); +} +inline void DYNAMICGLES_FUNCTION(BlendColor)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBlendColor) PFNGLBLENDCOLORPROC; +#endif + PFNGLBLENDCOLORPROC _BlendColor = (PFNGLBLENDCOLORPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BlendColor); + return _BlendColor(red, green, blue, alpha); +} +inline void DYNAMICGLES_FUNCTION(BlendEquation)(GLenum mode) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBlendEquation) PFNGLBLENDEQUATIONPROC; +#endif + PFNGLBLENDEQUATIONPROC _BlendEquation = (PFNGLBLENDEQUATIONPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BlendEquation); + return _BlendEquation(mode); +} +inline void DYNAMICGLES_FUNCTION(BlendEquationSeparate)(GLenum modeRGB, GLenum modeAlpha) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBlendEquationSeparate) PFNGLBLENDEQUATIONSEPARATEPROC; +#endif + PFNGLBLENDEQUATIONSEPARATEPROC _BlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BlendEquationSeparate); + return _BlendEquationSeparate(modeRGB, modeAlpha); +} +inline void DYNAMICGLES_FUNCTION(BlendFunc)(GLenum sfactor, GLenum dfactor) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBlendFunc) PFNGLBLENDFUNCPROC; +#endif + PFNGLBLENDFUNCPROC _BlendFunc = (PFNGLBLENDFUNCPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BlendFunc); + return _BlendFunc(sfactor, dfactor); +} +inline void DYNAMICGLES_FUNCTION(BlendFuncSeparate)(GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBlendFuncSeparate) PFNGLBLENDFUNCSEPARATEPROC; +#endif + PFNGLBLENDFUNCSEPARATEPROC _BlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BlendFuncSeparate); + return _BlendFuncSeparate(sfactorRGB, dfactorRGB, sfactorAlpha, dfactorAlpha); +} +inline void DYNAMICGLES_FUNCTION(BufferData)(GLenum target, GLsizeiptr size, const void* data, GLenum usage) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBufferData) PFNGLBUFFERDATAPROC; +#endif + PFNGLBUFFERDATAPROC _BufferData = (PFNGLBUFFERDATAPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BufferData); + return _BufferData(target, size, data, usage); +} +inline void DYNAMICGLES_FUNCTION(BufferSubData)(GLenum target, GLintptr offset, GLsizeiptr size, const void* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBufferSubData) PFNGLBUFFERSUBDATAPROC; +#endif + PFNGLBUFFERSUBDATAPROC _BufferSubData = (PFNGLBUFFERSUBDATAPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::BufferSubData); + return _BufferSubData(target, offset, size, data); +} +inline GLenum DYNAMICGLES_FUNCTION(CheckFramebufferStatus)(GLenum target) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCheckFramebufferStatus) PFNGLCHECKFRAMEBUFFERSTATUSPROC; +#endif + PFNGLCHECKFRAMEBUFFERSTATUSPROC _CheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CheckFramebufferStatus); + return _CheckFramebufferStatus(target); +} +inline void DYNAMICGLES_FUNCTION(Clear)(GLbitfield mask) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClear) PFNGLCLEARPROC; +#endif + PFNGLCLEARPROC _Clear = (PFNGLCLEARPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Clear); + return _Clear(mask); +} +inline void DYNAMICGLES_FUNCTION(ClearColor)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClearColor) PFNGLCLEARCOLORPROC; +#endif + PFNGLCLEARCOLORPROC _ClearColor = (PFNGLCLEARCOLORPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ClearColor); + return _ClearColor(red, green, blue, alpha); +} +inline void DYNAMICGLES_FUNCTION(ClearDepthf)(GLfloat d) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClearDepthf) PFNGLCLEARDEPTHFPROC; +#endif + PFNGLCLEARDEPTHFPROC _ClearDepthf = (PFNGLCLEARDEPTHFPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ClearDepthf); + return _ClearDepthf(d); +} +inline void DYNAMICGLES_FUNCTION(ClearStencil)(GLint s) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClearStencil) PFNGLCLEARSTENCILPROC; +#endif + PFNGLCLEARSTENCILPROC _ClearStencil = (PFNGLCLEARSTENCILPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ClearStencil); + return _ClearStencil(s); +} +inline void DYNAMICGLES_FUNCTION(ColorMask)(GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glColorMask) PFNGLCOLORMASKPROC; +#endif + PFNGLCOLORMASKPROC _ColorMask = (PFNGLCOLORMASKPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ColorMask); + return _ColorMask(red, green, blue, alpha); +} +inline void DYNAMICGLES_FUNCTION(CompileShader)(GLuint shader) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCompileShader) PFNGLCOMPILESHADERPROC; +#endif + PFNGLCOMPILESHADERPROC _CompileShader = (PFNGLCOMPILESHADERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CompileShader); + return _CompileShader(shader); +} +inline void DYNAMICGLES_FUNCTION(CompressedTexImage2D)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCompressedTexImage2D) PFNGLCOMPRESSEDTEXIMAGE2DPROC; +#endif + PFNGLCOMPRESSEDTEXIMAGE2DPROC _CompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CompressedTexImage2D); + return _CompressedTexImage2D(target, level, internalformat, width, height, border, imageSize, data); +} +inline void DYNAMICGLES_FUNCTION(CompressedTexSubImage2D)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCompressedTexSubImage2D) PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC; +#endif + PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC _CompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CompressedTexSubImage2D); + return _CompressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, imageSize, data); +} +inline void DYNAMICGLES_FUNCTION(CopyTexImage2D)(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCopyTexImage2D) PFNGLCOPYTEXIMAGE2DPROC; +#endif + PFNGLCOPYTEXIMAGE2DPROC _CopyTexImage2D = (PFNGLCOPYTEXIMAGE2DPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CopyTexImage2D); + return _CopyTexImage2D(target, level, internalformat, x, y, width, height, border); +} +inline void DYNAMICGLES_FUNCTION(CopyTexSubImage2D)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCopyTexSubImage2D) PFNGLCOPYTEXSUBIMAGE2DPROC; +#endif + PFNGLCOPYTEXSUBIMAGE2DPROC _CopyTexSubImage2D = (PFNGLCOPYTEXSUBIMAGE2DPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CopyTexSubImage2D); + return _CopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height); +} +inline GLenum DYNAMICGLES_FUNCTION(CreateProgram)() +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCreateProgram) PFNGLCREATEPROGRAMPROC; +#endif + PFNGLCREATEPROGRAMPROC _CreateProgram = (PFNGLCREATEPROGRAMPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CreateProgram); + return _CreateProgram(); +} +inline GLenum DYNAMICGLES_FUNCTION(CreateShader)(GLenum target) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCreateShader) PFNGLCREATESHADERPROC; +#endif + PFNGLCREATESHADERPROC _CreateShader = (PFNGLCREATESHADERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CreateShader); + return _CreateShader(target); +} +inline void DYNAMICGLES_FUNCTION(CullFace)(GLenum mode) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCullFace) PFNGLCULLFACEPROC; +#endif + PFNGLCULLFACEPROC _CullFace = (PFNGLCULLFACEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::CullFace); + return _CullFace(mode); +} +inline void DYNAMICGLES_FUNCTION(DeleteBuffers)(GLsizei n, const GLuint* buffers) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteBuffers) PFNGLDELETEBUFFERSPROC; +#endif + PFNGLDELETEBUFFERSPROC _DeleteBuffers = (PFNGLDELETEBUFFERSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DeleteBuffers); + return _DeleteBuffers(n, buffers); +} +inline void DYNAMICGLES_FUNCTION(DeleteFramebuffers)(GLsizei n, const GLuint* framebuffers) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteFramebuffers) PFNGLDELETEFRAMEBUFFERSPROC; +#endif + PFNGLDELETEFRAMEBUFFERSPROC _DeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DeleteFramebuffers); + return _DeleteFramebuffers(n, framebuffers); +} +inline void DYNAMICGLES_FUNCTION(DeleteProgram)(GLuint program) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteProgram) PFNGLDELETEPROGRAMPROC; +#endif + PFNGLDELETEPROGRAMPROC _DeleteProgram = (PFNGLDELETEPROGRAMPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DeleteProgram); + return _DeleteProgram(program); +} +inline void DYNAMICGLES_FUNCTION(DeleteRenderbuffers)(GLsizei n, const GLuint* renderbuffers) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteRenderbuffers) PFNGLDELETERENDERBUFFERSPROC; +#endif + PFNGLDELETERENDERBUFFERSPROC _DeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DeleteRenderbuffers); + return _DeleteRenderbuffers(n, renderbuffers); +} +inline void DYNAMICGLES_FUNCTION(DeleteShader)(GLuint shader) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteShader) PFNGLDELETESHADERPROC; +#endif + PFNGLDELETESHADERPROC _DeleteShader = (PFNGLDELETESHADERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DeleteShader); + return _DeleteShader(shader); +} +inline void DYNAMICGLES_FUNCTION(DeleteTextures)(GLsizei n, const GLuint* textures) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteTextures) PFNGLDELETETEXTURESPROC; +#endif + PFNGLDELETETEXTURESPROC _DeleteTextures = (PFNGLDELETETEXTURESPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DeleteTextures); + return _DeleteTextures(n, textures); +} +inline void DYNAMICGLES_FUNCTION(DepthFunc)(GLenum func) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDepthFunc) PFNGLDEPTHFUNCPROC; +#endif + PFNGLDEPTHFUNCPROC _DepthFunc = (PFNGLDEPTHFUNCPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DepthFunc); + return _DepthFunc(func); +} +inline void DYNAMICGLES_FUNCTION(DepthMask)(GLboolean flag) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDepthMask) PFNGLDEPTHMASKPROC; +#endif + PFNGLDEPTHMASKPROC _DepthMask = (PFNGLDEPTHMASKPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DepthMask); + return _DepthMask(flag); +} +inline void DYNAMICGLES_FUNCTION(DepthRangef)(GLfloat n, GLfloat f) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDepthRangef) PFNGLDEPTHRANGEFPROC; +#endif + PFNGLDEPTHRANGEFPROC _DepthRangef = (PFNGLDEPTHRANGEFPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DepthRangef); + return _DepthRangef(n, f); +} +inline void DYNAMICGLES_FUNCTION(DetachShader)(GLuint program, GLuint shader) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDetachShader) PFNGLDETACHSHADERPROC; +#endif + PFNGLDETACHSHADERPROC _DetachShader = (PFNGLDETACHSHADERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DetachShader); + return _DetachShader(program, shader); +} +inline void DYNAMICGLES_FUNCTION(Disable)(GLenum cap) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDisable) PFNGLDISABLEPROC; +#endif + PFNGLDISABLEPROC _Disable = (PFNGLDISABLEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Disable); + return _Disable(cap); +} +inline void DYNAMICGLES_FUNCTION(DisableVertexAttribArray)(GLuint index) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDisableVertexAttribArray) PFNGLDISABLEVERTEXATTRIBARRAYPROC; +#endif + PFNGLDISABLEVERTEXATTRIBARRAYPROC _DisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DisableVertexAttribArray); + return _DisableVertexAttribArray(index); +} +inline void DYNAMICGLES_FUNCTION(DrawArrays)(GLenum mode, GLint first, GLsizei count) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDrawArrays) PFNGLDRAWARRAYSPROC; +#endif + PFNGLDRAWARRAYSPROC _DrawArrays = (PFNGLDRAWARRAYSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DrawArrays); + return _DrawArrays(mode, first, count); +} +inline void DYNAMICGLES_FUNCTION(DrawElements)(GLenum mode, GLsizei count, GLenum type, const void* indices) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDrawElements) PFNGLDRAWELEMENTSPROC; +#endif + PFNGLDRAWELEMENTSPROC _DrawElements = (PFNGLDRAWELEMENTSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::DrawElements); + return _DrawElements(mode, count, type, indices); +} +inline void DYNAMICGLES_FUNCTION(Enable)(GLenum cap) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glEnable) PFNGLENABLEPROC; +#endif + PFNGLENABLEPROC _Enable = (PFNGLENABLEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Enable); + return _Enable(cap); +} +inline void DYNAMICGLES_FUNCTION(EnableVertexAttribArray)(GLuint index) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glEnableVertexAttribArray) PFNGLENABLEVERTEXATTRIBARRAYPROC; +#endif + PFNGLENABLEVERTEXATTRIBARRAYPROC _EnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::EnableVertexAttribArray); + return _EnableVertexAttribArray(index); +} +inline void DYNAMICGLES_FUNCTION(Finish)(void) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFinish) PFNGLFINISHPROC; +#endif + PFNGLFINISHPROC _Finish = (PFNGLFINISHPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Finish); + return _Finish(); +} +inline void DYNAMICGLES_FUNCTION(Flush)(void) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFlush) PFNGLFLUSHPROC; +#endif + PFNGLFLUSHPROC _Flush = (PFNGLFLUSHPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Flush); + return _Flush(); +} +inline void DYNAMICGLES_FUNCTION(FramebufferRenderbuffer)(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFramebufferRenderbuffer) PFNGLFRAMEBUFFERRENDERBUFFERPROC; +#endif + PFNGLFRAMEBUFFERRENDERBUFFERPROC _FramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::FramebufferRenderbuffer); + return _FramebufferRenderbuffer(target, attachment, renderbuffertarget, renderbuffer); +} +inline void DYNAMICGLES_FUNCTION(FramebufferTexture2D)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFramebufferTexture2D) PFNGLFRAMEBUFFERTEXTURE2DPROC; +#endif + PFNGLFRAMEBUFFERTEXTURE2DPROC _FramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::FramebufferTexture2D); + return _FramebufferTexture2D(target, attachment, textarget, texture, level); +} +inline void DYNAMICGLES_FUNCTION(FrontFace)(GLenum mode) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFrontFace) PFNGLFRONTFACEPROC; +#endif + PFNGLFRONTFACEPROC _FrontFace = (PFNGLFRONTFACEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::FrontFace); + return _FrontFace(mode); +} +inline void DYNAMICGLES_FUNCTION(GenBuffers)(GLsizei n, GLuint* buffers) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenBuffers) PFNGLGENBUFFERSPROC; +#endif + PFNGLGENBUFFERSPROC _GenBuffers = (PFNGLGENBUFFERSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GenBuffers); + return _GenBuffers(n, buffers); +} +inline void DYNAMICGLES_FUNCTION(GenerateMipmap)(GLenum target) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenerateMipmap) PFNGLGENERATEMIPMAPPROC; +#endif + PFNGLGENERATEMIPMAPPROC _GenerateMipmap = (PFNGLGENERATEMIPMAPPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GenerateMipmap); + return _GenerateMipmap(target); +} +inline void DYNAMICGLES_FUNCTION(GenFramebuffers)(GLsizei n, GLuint* framebuffers) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenFramebuffers) PFNGLGENFRAMEBUFFERSPROC; +#endif + PFNGLGENFRAMEBUFFERSPROC _GenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GenFramebuffers); + return _GenFramebuffers(n, framebuffers); +} +inline void DYNAMICGLES_FUNCTION(GenRenderbuffers)(GLsizei n, GLuint* renderbuffers) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenRenderbuffers) PFNGLGENRENDERBUFFERSPROC; +#endif + PFNGLGENRENDERBUFFERSPROC _GenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GenRenderbuffers); + return _GenRenderbuffers(n, renderbuffers); +} +inline void DYNAMICGLES_FUNCTION(GenTextures)(GLsizei n, GLuint* textures) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenTextures) PFNGLGENTEXTURESPROC; +#endif + PFNGLGENTEXTURESPROC _GenTextures = (PFNGLGENTEXTURESPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GenTextures); + return _GenTextures(n, textures); +} +inline void DYNAMICGLES_FUNCTION(GetActiveAttrib)(GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLint* size, GLenum* type, GLchar* name) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetActiveAttrib) PFNGLGETACTIVEATTRIBPROC; +#endif + PFNGLGETACTIVEATTRIBPROC _GetActiveAttrib = (PFNGLGETACTIVEATTRIBPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetActiveAttrib); + return _GetActiveAttrib(program, index, bufSize, length, size, type, name); + +} +inline void DYNAMICGLES_FUNCTION(GetActiveUniform)(GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLint* size, GLenum* type, GLchar* name) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetActiveUniform) PFNGLGETACTIVEUNIFORMPROC; +#endif + PFNGLGETACTIVEUNIFORMPROC _GetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetActiveUniform); + return _GetActiveUniform(program, index, bufSize, length, size, type, name); + +} +inline void DYNAMICGLES_FUNCTION(GetAttachedShaders)(GLuint program, GLsizei maxCount, GLsizei* count, GLuint* shaders) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetAttachedShaders) PFNGLGETATTACHEDSHADERSPROC; +#endif + PFNGLGETATTACHEDSHADERSPROC _GetAttachedShaders = (PFNGLGETATTACHEDSHADERSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetAttachedShaders); + return _GetAttachedShaders(program, maxCount, count, shaders); +} +inline GLint DYNAMICGLES_FUNCTION(GetAttribLocation)(GLuint program, const GLchar* name) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetAttribLocation) PFNGLGETATTRIBLOCATIONPROC; +#endif + PFNGLGETATTRIBLOCATIONPROC _GetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetAttribLocation); + return _GetAttribLocation(program, name); +} +inline void DYNAMICGLES_FUNCTION(GetBooleanv)(GLenum pname, GLboolean* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetBooleanv) PFNGLGETBOOLEANVPROC; +#endif + PFNGLGETBOOLEANVPROC _GetBooleanv = (PFNGLGETBOOLEANVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetBooleanv); + return _GetBooleanv(pname, data); +} +inline void DYNAMICGLES_FUNCTION(GetBufferParameteriv)(GLenum target, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetBufferParameteriv) PFNGLGETBUFFERPARAMETERIVPROC; +#endif + PFNGLGETBUFFERPARAMETERIVPROC _GetBufferParameteriv = (PFNGLGETBUFFERPARAMETERIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetBufferParameteriv); + return _GetBufferParameteriv(target, pname, params); +} +inline GLenum DYNAMICGLES_FUNCTION(GetError)(void) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetError) PFNGLGETERRORPROC; +#endif + PFNGLGETERRORPROC _GetError = (PFNGLGETERRORPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetError); + return _GetError(); +} +inline void DYNAMICGLES_FUNCTION(GetFloatv)(GLenum pname, GLfloat* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetFloatv) PFNGLGETFLOATVPROC; +#endif + PFNGLGETFLOATVPROC _GetFloatv = (PFNGLGETFLOATVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetFloatv); + return _GetFloatv(pname, data); +} +inline void DYNAMICGLES_FUNCTION(GetFramebufferAttachmentParameteriv)(GLenum target, GLenum attachment, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetFramebufferAttachmentParameteriv) PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC; +#endif + PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC _GetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetFramebufferAttachmentParameteriv); + return _GetFramebufferAttachmentParameteriv(target, attachment, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetIntegerv)(GLenum pname, GLint* data) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetIntegerv) PFNGLGETINTEGERVPROC; +#endif + PFNGLGETINTEGERVPROC _GetIntegerv = (PFNGLGETINTEGERVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetIntegerv); + return _GetIntegerv(pname, data); +} +inline void DYNAMICGLES_FUNCTION(GetProgramiv)(GLuint program, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetProgramiv) PFNGLGETPROGRAMIVPROC; +#endif + PFNGLGETPROGRAMIVPROC _GetProgramiv = (PFNGLGETPROGRAMIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetProgramiv); + return _GetProgramiv(program, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetProgramInfoLog)(GLuint program, GLsizei bufSize, GLsizei* length, GLchar* infoLog) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetProgramInfoLog) PFNGLGETPROGRAMINFOLOGPROC; +#endif + PFNGLGETPROGRAMINFOLOGPROC _GetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetProgramInfoLog); + return _GetProgramInfoLog(program, bufSize, length, infoLog); + +} +inline void DYNAMICGLES_FUNCTION(GetRenderbufferParameteriv)(GLenum target, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetRenderbufferParameteriv) PFNGLGETRENDERBUFFERPARAMETERIVPROC; +#endif + PFNGLGETRENDERBUFFERPARAMETERIVPROC _GetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetRenderbufferParameteriv); + return _GetRenderbufferParameteriv(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetShaderiv)(GLuint shader, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetShaderiv) PFNGLGETSHADERIVPROC; +#endif + PFNGLGETSHADERIVPROC _GetShaderiv = (PFNGLGETSHADERIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetShaderiv); + return _GetShaderiv(shader, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetShaderInfoLog)(GLuint shader, GLsizei bufSize, GLsizei* length, GLchar* infoLog) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetShaderInfoLog) PFNGLGETSHADERINFOLOGPROC; +#endif + PFNGLGETSHADERINFOLOGPROC _GetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetShaderInfoLog); + return _GetShaderInfoLog(shader, bufSize, length, infoLog); + +} +inline void DYNAMICGLES_FUNCTION(GetShaderPrecisionFormat)(GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetShaderPrecisionFormat) PFNGLGETSHADERPRECISIONFORMATPROC; +#endif + PFNGLGETSHADERPRECISIONFORMATPROC _GetShaderPrecisionFormat = (PFNGLGETSHADERPRECISIONFORMATPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetShaderPrecisionFormat); + return _GetShaderPrecisionFormat(shadertype, precisiontype, range, precision); +} +inline void DYNAMICGLES_FUNCTION(GetShaderSource)(GLuint shader, GLsizei bufSize, GLsizei* length, GLchar* source) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetShaderSource) PFNGLGETSHADERSOURCEPROC; +#endif + PFNGLGETSHADERSOURCEPROC _GetShaderSource = (PFNGLGETSHADERSOURCEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetShaderSource); + return _GetShaderSource(shader, bufSize, length, source); + +} +inline const GLubyte* DYNAMICGLES_FUNCTION(GetString)(GLenum name) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetString) PFNGLGETSTRINGPROC; +#endif + PFNGLGETSTRINGPROC _GetString = (PFNGLGETSTRINGPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetString); + return _GetString(name); +} +inline void DYNAMICGLES_FUNCTION(GetTexParameterfv)(GLenum target, GLenum pname, GLfloat* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetTexParameterfv) PFNGLGETTEXPARAMETERFVPROC; +#endif + PFNGLGETTEXPARAMETERFVPROC _GetTexParameterfv = (PFNGLGETTEXPARAMETERFVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetTexParameterfv); + return _GetTexParameterfv(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetTexParameteriv)(GLenum target, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetTexParameteriv) PFNGLGETTEXPARAMETERIVPROC; +#endif + PFNGLGETTEXPARAMETERIVPROC _GetTexParameteriv = (PFNGLGETTEXPARAMETERIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetTexParameteriv); + return _GetTexParameteriv(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetUniformfv)(GLuint program, GLint location, GLfloat* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetUniformfv) PFNGLGETUNIFORMFVPROC; +#endif + PFNGLGETUNIFORMFVPROC _GetUniformfv = (PFNGLGETUNIFORMFVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetUniformfv); + return _GetUniformfv(program, location, params); +} +inline void DYNAMICGLES_FUNCTION(GetUniformiv)(GLuint program, GLint location, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetUniformiv) PFNGLGETUNIFORMIVPROC; +#endif + PFNGLGETUNIFORMIVPROC _GetUniformiv = (PFNGLGETUNIFORMIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetUniformiv); + return _GetUniformiv(program, location, params); +} +inline GLint DYNAMICGLES_FUNCTION(GetUniformLocation)(GLuint program, const GLchar* name) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetUniformLocation) PFNGLGETUNIFORMLOCATIONPROC; +#endif + PFNGLGETUNIFORMLOCATIONPROC _GetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetUniformLocation); + return _GetUniformLocation(program, name); +} +inline void DYNAMICGLES_FUNCTION(GetVertexAttribfv)(GLuint index, GLenum pname, GLfloat* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetVertexAttribfv) PFNGLGETVERTEXATTRIBFVPROC; +#endif + PFNGLGETVERTEXATTRIBFVPROC _GetVertexAttribfv = (PFNGLGETVERTEXATTRIBFVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetVertexAttribfv); + return _GetVertexAttribfv(index, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetVertexAttribiv)(GLuint index, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetVertexAttribiv) PFNGLGETVERTEXATTRIBIVPROC; +#endif + PFNGLGETVERTEXATTRIBIVPROC _GetVertexAttribiv = (PFNGLGETVERTEXATTRIBIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetVertexAttribiv); + return _GetVertexAttribiv(index, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetVertexAttribPointerv)(GLuint index, GLenum pname, void** pointer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetVertexAttribPointerv) PFNGLGETVERTEXATTRIBPOINTERVPROC; +#endif + PFNGLGETVERTEXATTRIBPOINTERVPROC _GetVertexAttribPointerv = (PFNGLGETVERTEXATTRIBPOINTERVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::GetVertexAttribPointerv); + return _GetVertexAttribPointerv(index, pname, pointer); +} +inline void DYNAMICGLES_FUNCTION(Hint)(GLenum target, GLenum mode) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glHint) PFNGLHINTPROC; +#endif + PFNGLHINTPROC _Hint = (PFNGLHINTPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Hint); + return _Hint(target, mode); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsBuffer)(GLuint buffer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsBuffer) PFNGLISBUFFERPROC; +#endif + PFNGLISBUFFERPROC _IsBuffer = (PFNGLISBUFFERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::IsBuffer); + return _IsBuffer(buffer); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsEnabled)(GLenum cap) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsEnabled) PFNGLISENABLEDPROC; +#endif + PFNGLISENABLEDPROC _IsEnabled = (PFNGLISENABLEDPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::IsEnabled); + return _IsEnabled(cap); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsFramebuffer)(GLuint framebuffer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsFramebuffer) PFNGLISFRAMEBUFFERPROC; +#endif + PFNGLISFRAMEBUFFERPROC _IsFramebuffer = (PFNGLISFRAMEBUFFERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::IsFramebuffer); + return _IsFramebuffer(framebuffer); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsProgram)(GLuint program) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsProgram) PFNGLISPROGRAMPROC; +#endif + PFNGLISPROGRAMPROC _IsProgram = (PFNGLISPROGRAMPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::IsProgram); + return _IsProgram(program); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsRenderbuffer)(GLuint renderbuffer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsRenderbuffer) PFNGLISRENDERBUFFERPROC; +#endif + PFNGLISRENDERBUFFERPROC _IsRenderbuffer = (PFNGLISRENDERBUFFERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::IsRenderbuffer); + return _IsRenderbuffer(renderbuffer); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsShader)(GLuint shader) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsShader) PFNGLISSHADERPROC; +#endif + PFNGLISSHADERPROC _IsShader = (PFNGLISSHADERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::IsShader); + return _IsShader(shader); + +} +inline GLboolean DYNAMICGLES_FUNCTION(IsTexture)(GLuint texture) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsTexture) PFNGLISTEXTUREPROC; +#endif + PFNGLISTEXTUREPROC _IsTexture = (PFNGLISTEXTUREPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::IsTexture); + return _IsTexture(texture); +} +inline void DYNAMICGLES_FUNCTION(LineWidth)(GLfloat width) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glLineWidth) PFNGLLINEWIDTHPROC; +#endif + PFNGLLINEWIDTHPROC _LineWidth = (PFNGLLINEWIDTHPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::LineWidth); + return _LineWidth(width); +} +inline void DYNAMICGLES_FUNCTION(LinkProgram)(GLuint program) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glLinkProgram) PFNGLLINKPROGRAMPROC; +#endif + PFNGLLINKPROGRAMPROC _LinkProgram = (PFNGLLINKPROGRAMPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::LinkProgram); + return _LinkProgram(program); +} +inline void DYNAMICGLES_FUNCTION(PixelStorei)(GLenum pname, GLint param) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glPixelStorei) PFNGLPIXELSTOREIPROC; +#endif + PFNGLPIXELSTOREIPROC _PixelStorei = (PFNGLPIXELSTOREIPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::PixelStorei); + return _PixelStorei(pname, param); +} +inline void DYNAMICGLES_FUNCTION(PolygonOffset)(GLfloat factor, GLfloat units) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glPolygonOffset) PFNGLPOLYGONOFFSETPROC; +#endif + PFNGLPOLYGONOFFSETPROC _PolygonOffset = (PFNGLPOLYGONOFFSETPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::PolygonOffset); + return _PolygonOffset(factor, units); +} +inline void DYNAMICGLES_FUNCTION(ReadPixels)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void* pixels) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glReadPixels) PFNGLREADPIXELSPROC; +#endif + PFNGLREADPIXELSPROC _ReadPixels = (PFNGLREADPIXELSPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ReadPixels); + return _ReadPixels(x, y, width, height, format, type, pixels); +} +inline void DYNAMICGLES_FUNCTION(ReleaseShaderCompiler)(void) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glReleaseShaderCompiler) PFNGLRELEASESHADERCOMPILERPROC; +#endif + PFNGLRELEASESHADERCOMPILERPROC _ReleaseShaderCompiler = (PFNGLRELEASESHADERCOMPILERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ReleaseShaderCompiler); + return _ReleaseShaderCompiler(); +} +inline void DYNAMICGLES_FUNCTION(RenderbufferStorage)(GLenum target, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glRenderbufferStorage) PFNGLRENDERBUFFERSTORAGEPROC; +#endif + PFNGLRENDERBUFFERSTORAGEPROC _RenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::RenderbufferStorage); + return _RenderbufferStorage(target, internalformat, width, height); +} +inline void DYNAMICGLES_FUNCTION(SampleCoverage)(GLfloat value, GLboolean invert) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glSampleCoverage) PFNGLSAMPLECOVERAGEPROC; +#endif + PFNGLSAMPLECOVERAGEPROC _SampleCoverage = (PFNGLSAMPLECOVERAGEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::SampleCoverage); + return _SampleCoverage(value, invert); +} +inline void DYNAMICGLES_FUNCTION(Scissor)(GLint x, GLint y, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glScissor) PFNGLSCISSORPROC; +#endif + PFNGLSCISSORPROC _Scissor = (PFNGLSCISSORPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Scissor); + return _Scissor(x, y, width, height); +} +inline void DYNAMICGLES_FUNCTION(ShaderBinary)(GLsizei count, const GLuint* shaders, GLenum binaryformat, const void* binary, GLsizei length) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glShaderBinary) PFNGLSHADERBINARYPROC; +#endif + PFNGLSHADERBINARYPROC _ShaderBinary = (PFNGLSHADERBINARYPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ShaderBinary); + return _ShaderBinary(count, shaders, binaryformat, binary, length); +} +inline void DYNAMICGLES_FUNCTION(ShaderSource)(GLuint shader, GLsizei count, const GLchar* const* string, const GLint* length) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glShaderSource) PFNGLSHADERSOURCEPROC; +#endif + PFNGLSHADERSOURCEPROC _ShaderSource = (PFNGLSHADERSOURCEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ShaderSource); + return _ShaderSource(shader, count, string, length); + +} +inline void DYNAMICGLES_FUNCTION(StencilFunc)(GLenum func, GLint ref, GLuint mask) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glStencilFunc) PFNGLSTENCILFUNCPROC; +#endif + PFNGLSTENCILFUNCPROC _StencilFunc = (PFNGLSTENCILFUNCPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::StencilFunc); + return _StencilFunc(func, ref, mask); +} +inline void DYNAMICGLES_FUNCTION(StencilFuncSeparate)(GLenum face, GLenum func, GLint ref, GLuint mask) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glStencilFuncSeparate) PFNGLSTENCILFUNCSEPARATEPROC; +#endif + PFNGLSTENCILFUNCSEPARATEPROC _StencilFuncSeparate = (PFNGLSTENCILFUNCSEPARATEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::StencilFuncSeparate); + return _StencilFuncSeparate(face, func, ref, mask); +} +inline void DYNAMICGLES_FUNCTION(StencilMask)(GLuint mask) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glStencilMask) PFNGLSTENCILMASKPROC; +#endif + PFNGLSTENCILMASKPROC _StencilMask = (PFNGLSTENCILMASKPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::StencilMask); + return _StencilMask(mask); +} +inline void DYNAMICGLES_FUNCTION(StencilMaskSeparate)(GLenum face, GLuint mask) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glStencilMaskSeparate) PFNGLSTENCILMASKSEPARATEPROC; +#endif + PFNGLSTENCILMASKSEPARATEPROC _StencilMaskSeparate = (PFNGLSTENCILMASKSEPARATEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::StencilMaskSeparate); + return _StencilMaskSeparate(face, mask); +} +inline void DYNAMICGLES_FUNCTION(StencilOp)(GLenum fail, GLenum zfail, GLenum zpass) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glStencilOp) PFNGLSTENCILOPPROC; +#endif + PFNGLSTENCILOPPROC _StencilOp = (PFNGLSTENCILOPPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::StencilOp); + return _StencilOp(fail, zfail, zpass); +} +inline void DYNAMICGLES_FUNCTION(StencilOpSeparate)(GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glStencilOpSeparate) PFNGLSTENCILOPSEPARATEPROC; +#endif + PFNGLSTENCILOPSEPARATEPROC _StencilOpSeparate = (PFNGLSTENCILOPSEPARATEPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::StencilOpSeparate); + return _StencilOpSeparate(face, sfail, dpfail, dppass); +} +inline void DYNAMICGLES_FUNCTION(TexImage2D)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void* pixels) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexImage2D) PFNGLTEXIMAGE2DPROC; +#endif + PFNGLTEXIMAGE2DPROC _TexImage2D = (PFNGLTEXIMAGE2DPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::TexImage2D); + return _TexImage2D(target, level, internalformat, width, height, border, format, type, pixels); +} +inline void DYNAMICGLES_FUNCTION(TexParameterf)(GLenum target, GLenum pname, GLfloat param) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexParameterf) PFNGLTEXPARAMETERFPROC; +#endif + PFNGLTEXPARAMETERFPROC _TexParameterf = (PFNGLTEXPARAMETERFPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::TexParameterf); + return _TexParameterf(target, pname, param); +} +inline void DYNAMICGLES_FUNCTION(TexParameterfv)(GLenum target, GLenum pname, const GLfloat* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexParameterfv) PFNGLTEXPARAMETERFVPROC; +#endif + PFNGLTEXPARAMETERFVPROC _TexParameterfv = (PFNGLTEXPARAMETERFVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::TexParameterfv); + return _TexParameterfv(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(TexParameteri)(GLenum target, GLenum pname, GLint param) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexParameteri) PFNGLTEXPARAMETERIPROC; +#endif + PFNGLTEXPARAMETERIPROC _TexParameteri = (PFNGLTEXPARAMETERIPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::TexParameteri); + return _TexParameteri(target, pname, param); +} +inline void DYNAMICGLES_FUNCTION(TexParameteriv)(GLenum target, GLenum pname, const GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexParameteriv) PFNGLTEXPARAMETERIVPROC; +#endif + PFNGLTEXPARAMETERIVPROC _TexParameteriv = (PFNGLTEXPARAMETERIVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::TexParameteriv); + return _TexParameteriv(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(TexSubImage2D)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void* pixels) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glTexSubImage2D) PFNGLTEXSUBIMAGE2DPROC; +#endif + PFNGLTEXSUBIMAGE2DPROC _TexSubImage2D = (PFNGLTEXSUBIMAGE2DPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::TexSubImage2D); + return _TexSubImage2D(target, level, xoffset, yoffset, width, height, format, type, pixels); +} +inline void DYNAMICGLES_FUNCTION(Uniform1f)(GLint location, GLfloat v0) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform1f) PFNGLUNIFORM1FPROC; +#endif + PFNGLUNIFORM1FPROC _Uniform1f = (PFNGLUNIFORM1FPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform1f); + return _Uniform1f(location, v0); +} +inline void DYNAMICGLES_FUNCTION(Uniform1fv)(GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform1fv) PFNGLUNIFORM1FVPROC; +#endif + PFNGLUNIFORM1FVPROC _Uniform1fv = (PFNGLUNIFORM1FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform1fv); + return _Uniform1fv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform1i)(GLint location, GLint v0) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform1i) PFNGLUNIFORM1IPROC; +#endif + PFNGLUNIFORM1IPROC _Uniform1i = (PFNGLUNIFORM1IPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform1i); + return _Uniform1i(location, v0); +} +inline void DYNAMICGLES_FUNCTION(Uniform1iv)(GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform1iv) PFNGLUNIFORM1IVPROC; +#endif + PFNGLUNIFORM1IVPROC _Uniform1iv = (PFNGLUNIFORM1IVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform1iv); + return _Uniform1iv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform2f)(GLint location, GLfloat v0, GLfloat v1) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform2f) PFNGLUNIFORM2FPROC; +#endif + PFNGLUNIFORM2FPROC _Uniform2f = (PFNGLUNIFORM2FPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform2f); + return _Uniform2f(location, v0, v1); +} +inline void DYNAMICGLES_FUNCTION(Uniform2fv)(GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform2fv) PFNGLUNIFORM2FVPROC; +#endif + PFNGLUNIFORM2FVPROC _Uniform2fv = (PFNGLUNIFORM2FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform2fv); + return _Uniform2fv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform2i)(GLint location, GLint v0, GLint v1) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform2i) PFNGLUNIFORM2IPROC; +#endif + PFNGLUNIFORM2IPROC _Uniform2i = (PFNGLUNIFORM2IPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform2i); + return _Uniform2i(location, v0, v1); +} +inline void DYNAMICGLES_FUNCTION(Uniform2iv)(GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform2iv) PFNGLUNIFORM2IVPROC; +#endif + PFNGLUNIFORM2IVPROC _Uniform2iv = (PFNGLUNIFORM2IVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform2iv); + return _Uniform2iv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform3f)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform3f) PFNGLUNIFORM3FPROC; +#endif + PFNGLUNIFORM3FPROC _Uniform3f = (PFNGLUNIFORM3FPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform3f); + return _Uniform3f(location, v0, v1, v2); +} +inline void DYNAMICGLES_FUNCTION(Uniform3fv)(GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform3fv) PFNGLUNIFORM3FVPROC; +#endif + PFNGLUNIFORM3FVPROC _Uniform3fv = (PFNGLUNIFORM3FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform3fv); + return _Uniform3fv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform3i)(GLint location, GLint v0, GLint v1, GLint v2) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform3i) PFNGLUNIFORM3IPROC; +#endif + PFNGLUNIFORM3IPROC _Uniform3i = (PFNGLUNIFORM3IPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform3i); + return _Uniform3i(location, v0, v1, v2); +} +inline void DYNAMICGLES_FUNCTION(Uniform3iv)(GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform3iv) PFNGLUNIFORM3IVPROC; +#endif + PFNGLUNIFORM3IVPROC _Uniform3iv = (PFNGLUNIFORM3IVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform3iv); + return _Uniform3iv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform4f)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform4f) PFNGLUNIFORM4FPROC; +#endif + PFNGLUNIFORM4FPROC _Uniform4f = (PFNGLUNIFORM4FPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform4f); + return _Uniform4f(location, v0, v1, v2, v3); +} +inline void DYNAMICGLES_FUNCTION(Uniform4fv)(GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform4fv) PFNGLUNIFORM4FVPROC; +#endif + PFNGLUNIFORM4FVPROC _Uniform4fv = (PFNGLUNIFORM4FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform4fv); + return _Uniform4fv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(Uniform4i)(GLint location, GLint v0, GLint v1, GLint v2, GLint v3) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform4i) PFNGLUNIFORM4IPROC; +#endif + PFNGLUNIFORM4IPROC _Uniform4i = (PFNGLUNIFORM4IPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform4i); + return _Uniform4i(location, v0, v1, v2, v3); +} +inline void DYNAMICGLES_FUNCTION(Uniform4iv)(GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniform4iv) PFNGLUNIFORM4IVPROC; +#endif + PFNGLUNIFORM4IVPROC _Uniform4iv = (PFNGLUNIFORM4IVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Uniform4iv); + return _Uniform4iv(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix2fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix2fv) PFNGLUNIFORMMATRIX2FVPROC; +#endif + PFNGLUNIFORMMATRIX2FVPROC _UniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::UniformMatrix2fv); + return _UniformMatrix2fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix3fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix3fv) PFNGLUNIFORMMATRIX3FVPROC; +#endif + PFNGLUNIFORMMATRIX3FVPROC _UniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::UniformMatrix3fv); + return _UniformMatrix3fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(UniformMatrix4fv)(GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUniformMatrix4fv) PFNGLUNIFORMMATRIX4FVPROC; +#endif + PFNGLUNIFORMMATRIX4FVPROC _UniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::UniformMatrix4fv); + return _UniformMatrix4fv(location, count, transpose, value); +} +inline void DYNAMICGLES_FUNCTION(UseProgram)(GLuint program) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUseProgram) PFNGLUSEPROGRAMPROC; +#endif + PFNGLUSEPROGRAMPROC _UseProgram = (PFNGLUSEPROGRAMPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::UseProgram); + return _UseProgram(program); +} +inline void DYNAMICGLES_FUNCTION(ValidateProgram)(GLuint program) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glValidateProgram) PFNGLVALIDATEPROGRAMPROC; +#endif + PFNGLVALIDATEPROGRAMPROC _ValidateProgram = (PFNGLVALIDATEPROGRAMPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::ValidateProgram); + return _ValidateProgram(program); +} +inline void DYNAMICGLES_FUNCTION(VertexAttrib1f)(GLuint index, GLfloat x) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttrib1f) PFNGLVERTEXATTRIB1FPROC; +#endif + PFNGLVERTEXATTRIB1FPROC _VertexAttrib1f = (PFNGLVERTEXATTRIB1FPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttrib1f); + return _VertexAttrib1f(index, x); +} +inline void DYNAMICGLES_FUNCTION(VertexAttrib1fv)(GLuint index, const GLfloat* v) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttrib1fv) PFNGLVERTEXATTRIB1FVPROC; +#endif + PFNGLVERTEXATTRIB1FVPROC _VertexAttrib1fv = (PFNGLVERTEXATTRIB1FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttrib1fv); + return _VertexAttrib1fv(index, v); +} +inline void DYNAMICGLES_FUNCTION(VertexAttrib2f)(GLuint index, GLfloat x, GLfloat y) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttrib2f) PFNGLVERTEXATTRIB2FPROC; +#endif + PFNGLVERTEXATTRIB2FPROC _VertexAttrib2f = (PFNGLVERTEXATTRIB2FPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttrib2f); + return _VertexAttrib2f(index, x, y); +} +inline void DYNAMICGLES_FUNCTION(VertexAttrib2fv)(GLuint index, const GLfloat* v) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttrib2fv) PFNGLVERTEXATTRIB2FVPROC; +#endif + PFNGLVERTEXATTRIB2FVPROC _VertexAttrib2fv = (PFNGLVERTEXATTRIB2FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttrib2fv); + return _VertexAttrib2fv(index, v); +} +inline void DYNAMICGLES_FUNCTION(VertexAttrib3f)(GLuint index, GLfloat x, GLfloat y, GLfloat z) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttrib3f) PFNGLVERTEXATTRIB3FPROC; +#endif + PFNGLVERTEXATTRIB3FPROC _VertexAttrib3f = (PFNGLVERTEXATTRIB3FPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttrib3f); + return _VertexAttrib3f(index, x, y, z); +} +inline void DYNAMICGLES_FUNCTION(VertexAttrib3fv)(GLuint index, const GLfloat* v) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttrib3fv) PFNGLVERTEXATTRIB3FVPROC; +#endif + PFNGLVERTEXATTRIB3FVPROC _VertexAttrib3fv = (PFNGLVERTEXATTRIB3FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttrib3fv); + return _VertexAttrib3fv(index, v); +} +inline void DYNAMICGLES_FUNCTION(VertexAttrib4f)(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttrib4f) PFNGLVERTEXATTRIB4FPROC; +#endif + PFNGLVERTEXATTRIB4FPROC _VertexAttrib4f = (PFNGLVERTEXATTRIB4FPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttrib4f); + return _VertexAttrib4f(index, x, y, z, w); +} +inline void DYNAMICGLES_FUNCTION(VertexAttrib4fv)(GLuint index, const GLfloat* v) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttrib4fv) PFNGLVERTEXATTRIB4FVPROC; +#endif + PFNGLVERTEXATTRIB4FVPROC _VertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttrib4fv); + return _VertexAttrib4fv(index, v); +} +inline void DYNAMICGLES_FUNCTION(VertexAttribPointer)(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void* pointer) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glVertexAttribPointer) PFNGLVERTEXATTRIBPOINTERPROC; +#endif + PFNGLVERTEXATTRIBPOINTERPROC _VertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::VertexAttribPointer); + return _VertexAttribPointer(index, size, type, normalized, stride, pointer); +} +inline void DYNAMICGLES_FUNCTION(Viewport)(GLint x, GLint y, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glViewport) PFNGLVIEWPORTPROC; +#endif + PFNGLVIEWPORTPROC _Viewport = (PFNGLVIEWPORTPROC)gl::internals::getEs2Function(gl::internals::Gl2FuncName::Viewport); + return _Viewport(x, y, width, height); +} +#ifndef DYNAMICGLES_NO_NAMESPACE +} +#elif TARGET_OS_IPHONE +} +} +#endif + +namespace gl { +namespace internals { +namespace GlExtFuncName { +enum OpenGLESExtFunctionName +{ + //EXTENSIONS // + MultiDrawArraysEXT, MultiDrawElementsEXT, DiscardFramebufferEXT, MapBufferOES, UnmapBufferOES, GetBufferPointervOES, BindVertexArrayOES, DeleteVertexArraysOES, + GenVertexArraysOES, IsVertexArrayOES, DeleteFencesNV, GenFencesNV, IsFenceNV, TestFenceNV, GetFenceivNV, FinishFenceNV, SetFenceNV, +#if !TARGET_OS_IPHONE + EGLImageTargetTexture2DOES, EGLImageTargetRenderbufferStorageOES, +#endif + RenderbufferStorageMultisampleIMG, FramebufferTexture2DMultisampleIMG, GetPerfMonitorGroupsAMD, GetPerfMonitorCountersAMD, GetPerfMonitorGroupStringAMD, + GetPerfMonitorCounterStringAMD, GetPerfMonitorCounterInfoAMD, GenPerfMonitorsAMD, DeletePerfMonitorsAMD, SelectPerfMonitorCountersAMD, BeginPerfMonitorAMD, + EndPerfMonitorAMD, GetPerfMonitorCounterDataAMD, BlitFramebufferANGLE, RenderbufferStorageMultisampleANGLE, + CoverageMaskNV, CoverageOperationNV, GetDriverControlsQCOM, GetDriverControlStringQCOM, EnableDriverControlQCOM, + DisableDriverControlQCOM, ExtGetTexturesQCOM, ExtGetBuffersQCOM, ExtGetRenderbuffersQCOM, ExtGetFramebuffersQCOM, ExtGetTexLevelParameterivQCOM, + ExtTexObjectStateOverrideiQCOM, ExtGetTexSubImageQCOM, ExtGetBufferPointervQCOM, ExtGetShadersQCOM, ExtGetProgramsQCOM, ExtIsProgramBinaryQCOM, + ExtGetProgramBinarySourceQCOM, StartTilingQCOM, EndTilingQCOM, GetProgramBinaryOES, ProgramBinaryOES, TexImage3DOES, TexSubImage3DOES, CopyTexSubImage3DOES, + CompressedTexImage3DOES, CompressedTexSubImage3DOES, FramebufferTexture3DOES, BlendEquationSeparateOES, BlendFuncSeparateOES, BlendEquationOES, QueryMatrixxOES, + CopyTextureLevelsAPPLE, RenderbufferStorageMultisampleAPPLE, ResolveMultisampleFramebufferAPPLE, FenceSyncAPPLE, IsSyncAPPLE, DeleteSyncAPPLE, + ClientWaitSyncAPPLE, WaitSyncAPPLE, GetInteger64vAPPLE, GetSyncivAPPLE, MapBufferRangeEXT, FlushMappedBufferRangeEXT, RenderbufferStorageMultisampleEXT, + FramebufferTexture2DMultisampleEXT, GetGraphicsResetStatusEXT, ReadnPixelsEXT, GetnUniformfvEXT, GetnUniformivEXT, TexStorage1DEXT, TexStorage2DEXT, + TexStorage3DEXT, TextureStorage1DEXT, TextureStorage2DEXT, TextureStorage3DEXT, +#if !defined(GL_KHR_debug) + GLDEBUGPROCKHR, +#endif + DebugMessageControlKHR, DebugMessageInsertKHR, DebugMessageCallbackKHR, GetDebugMessageLogKHR, PushDebugGroupKHR, PopDebugGroupKHR, ObjectLabelKHR, + GetObjectLabelKHR, ObjectPtrLabelKHR, GetObjectPtrLabelKHR, GetPointervKHR, DrawArraysInstancedANGLE, DrawElementsInstancedANGLE, VertexAttribDivisorANGLE, + GetTranslatedShaderSourceANGLE, LabelObjectEXT, GetObjectLabelEXT, InsertEventMarkerEXT, PushGroupMarkerEXT, PopGroupMarkerEXT, + GenQueriesEXT, DeleteQueriesEXT, IsQueryEXT, BeginQueryEXT, EndQueryEXT, GetQueryivEXT, GetQueryObjectuivEXT, UseProgramStagesEXT, ActiveShaderProgramEXT, + CreateShaderProgramvEXT, BindProgramPipelineEXT, DeleteProgramPipelinesEXT, GenProgramPipelinesEXT, IsProgramPipelineEXT, ProgramParameteriEXT, + GetProgramPipelineivEXT, ProgramUniform1iEXT, ProgramUniform2iEXT, ProgramUniform3iEXT, ProgramUniform4iEXT, ProgramUniform1fEXT, ProgramUniform2fEXT, + ProgramUniform3fEXT, ProgramUniform4fEXT, ProgramUniform1ivEXT, ProgramUniform2ivEXT, ProgramUniform3ivEXT, ProgramUniform4ivEXT, ProgramUniform1fvEXT, + ProgramUniform2fvEXT, ProgramUniform3fvEXT, ProgramUniform4fvEXT, ProgramUniformMatrix2fvEXT, ProgramUniformMatrix3fvEXT, ProgramUniformMatrix4fvEXT, + ValidateProgramPipelineEXT, GetProgramPipelineInfoLogEXT, ProgramUniform1uiEXT, ProgramUniform2uiEXT, ProgramUniform3uiEXT, ProgramUniform4uiEXT, + ProgramUniform1uivEXT, ProgramUniform2uivEXT, ProgramUniform3uivEXT, ProgramUniform4uivEXT, ProgramUniformMatrix2x3fvEXT, ProgramUniformMatrix3x2fvEXT, + ProgramUniformMatrix2x4fvEXT, ProgramUniformMatrix4x2fvEXT, ProgramUniformMatrix3x4fvEXT, ProgramUniformMatrix4x3fvEXT, AlphaFuncQCOM, ReadBufferNV, + DrawBuffersNV, ReadBufferIndexedEXT, DrawBuffersIndexedEXT, GetIntegeri_vEXT, DrawBuffersEXT, BlendEquationEXT, BlendBarrierKHR, TexStorage3DMultisampleOES, + FramebufferTextureMultiviewOVR, FramebufferPixelLocalStorageSizeEXT, ClearPixelLocalStorageuiEXT, GetFramebufferPixelLocalStorageSizeEXT, BufferStorageEXT, + ClearTexImageIMG, ClearTexSubImageIMG, ClearTexImageEXT, ClearTexSubImageEXT, FramebufferTexture2DDownsampleIMG, FramebufferTextureLayerDownsampleIMG, + PatchParameteriEXT, + +#ifdef GL_IMG_bindless_texture + GetTextureHandleIMG, GetTextureSamplerHandleIMG, UniformHandleui64IMG, UniformHandleui64vIMG, ProgramUniformHandleui64IMG, ProgramUniformHandleui64vIMG, +#endif + + NUMBER_OF_OPENGLEXT_FUNCTIONS +}; +} + +namespace GlExtFuncName { + +static const std::pair OpenGLESExtFunctionNamePairs[] = +{ + { MultiDrawArraysEXT, "glMultiDrawArraysEXT"}, + { MultiDrawElementsEXT, "glMultiDrawElementsEXT" }, + { DiscardFramebufferEXT, "glDiscardFramebufferEXT" }, + { MapBufferOES, "glMapBufferOES" }, + { UnmapBufferOES, "glUnmapBufferOES" }, + { GetBufferPointervOES, "glGetBufferPointervOES" }, + { BindVertexArrayOES, "glBindVertexArrayOES" }, + { DeleteVertexArraysOES, "glDeleteVertexArraysOES" }, + { GenVertexArraysOES, "glGenVertexArraysOES" }, + { IsVertexArrayOES, "glIsVertexArrayOES" }, + { DeleteFencesNV, "glDeleteFencesNV" }, + { GenFencesNV, "glGenFencesNV" }, + { IsFenceNV, "glIsFenceNV" }, + { TestFenceNV, "glTestFenceNV" }, + { GetFenceivNV, "glGetFenceivNV" }, + { FinishFenceNV, "glFinishFenceNV" }, + { SetFenceNV, "glSetFenceNV" }, +#if !TARGET_OS_IPHONE + { EGLImageTargetTexture2DOES, "glEGLImageTargetTexture2DOES"}, + { EGLImageTargetRenderbufferStorageOES, "glEGLImageTargetRenderbufferStorageOES"}, +#endif + { RenderbufferStorageMultisampleIMG, "glRenderbufferStorageMultisampleIMG" }, + { FramebufferTexture2DMultisampleIMG, "glFramebufferTexture2DMultisampleIMG" }, + { GetPerfMonitorGroupsAMD, "glGetPerfMonitorGroupsAMD" }, + { GetPerfMonitorCountersAMD, "glGetPerfMonitorCountersAMD" }, + { GetPerfMonitorGroupStringAMD, "glGetPerfMonitorGroupStringAMD"}, + { GetPerfMonitorCounterStringAMD, "glGetPerfMonitorCounterStringAMD" }, + { GetPerfMonitorCounterInfoAMD, "glGetPerfMonitorCounterInfoAMD" }, + { GenPerfMonitorsAMD, "glGenPerfMonitorsAMD" }, + { DeletePerfMonitorsAMD, "glDeletePerfMonitorsAMD" }, + { SelectPerfMonitorCountersAMD, "glSelectPerfMonitorCountersAMD" }, + { BeginPerfMonitorAMD, "glBeginPerfMonitorAMD" }, + { EndPerfMonitorAMD, "glEndPerfMonitorAMD" }, + { GetPerfMonitorCounterDataAMD, "glGetPerfMonitorCounterDataAMD" }, + { BlitFramebufferANGLE, "glBlitFramebufferANGLE" }, + { RenderbufferStorageMultisampleANGLE, "glRenderbufferStorageMultisampleANGLE" }, + { CoverageMaskNV, "glCoverageMaskNV" }, + { CoverageOperationNV, "glCoverageOperationNV" }, + { GetDriverControlsQCOM, "glGetDriverControlsQCOM" }, + { GetDriverControlStringQCOM, "glGetDriverControlStringQCOM" }, + { EnableDriverControlQCOM, "glEnableDriverControlQCOM" }, + { DisableDriverControlQCOM, "glDisableDriverControlQCOM" }, + { ExtGetTexturesQCOM, "glExtGetTexturesQCOM" }, + { ExtGetBuffersQCOM, "glExtGetBuffersQCOM" }, + { ExtGetRenderbuffersQCOM, "glExtGetRenderbuffersQCOM" }, + { ExtGetFramebuffersQCOM, "glExtGetFramebuffersQCOM" }, + { ExtGetTexLevelParameterivQCOM, "glExtGetTexLevelParameterivQCOM" }, + { ExtTexObjectStateOverrideiQCOM, "glExtTexObjectStateOverrideiQCOM" }, + { ExtGetTexSubImageQCOM, "glExtGetTexSubImageQCOM" }, + { ExtGetBufferPointervQCOM, "glExtGetBufferPointervQCOM" }, + { ExtGetShadersQCOM, "glExtGetShadersQCOM" }, + { ExtGetProgramsQCOM, "glExtGetProgramsQCOM" }, + { ExtIsProgramBinaryQCOM, "glExtIsProgramBinaryQCOM" }, + { ExtGetProgramBinarySourceQCOM, "glExtGetProgramBinarySourceQCOM" }, + { StartTilingQCOM, "glStartTilingQCOM" }, + { EndTilingQCOM, "glEndTilingQCOM" }, + { GetProgramBinaryOES, "glGetProgramBinaryOES" }, + { ProgramBinaryOES, "glProgramBinaryOES" }, + { TexImage3DOES, "glTexImage3DOES" }, + { TexSubImage3DOES, "glTexSubImage3DOES" }, + { CopyTexSubImage3DOES, "glCopyTexSubImage3DOES" }, + { CompressedTexImage3DOES, "glCompressedTexImage3DOES" }, + { CompressedTexSubImage3DOES, "glCompressedTexSubImage3DOES" }, + { FramebufferTexture3DOES, "glFramebufferTexture3DOES" }, + { BlendEquationSeparateOES, "glBlendEquationSeparateOES" }, + { BlendFuncSeparateOES, "glBlendFuncSeparateOES" }, + { BlendEquationOES, "glBlendEquationOES" }, + { QueryMatrixxOES, "glQueryMatrixxOES" }, + { CopyTextureLevelsAPPLE, "glCopyTextureLevelsAPPLE" }, + { RenderbufferStorageMultisampleAPPLE, "glRenderbufferStorageMultisampleAPPLE" }, + { ResolveMultisampleFramebufferAPPLE, "glResolveMultisampleFramebufferAPPLE" }, + { FenceSyncAPPLE, "glFenceSyncAPPLE" }, + { IsSyncAPPLE, "glIsSyncAPPLE" }, + { DeleteSyncAPPLE, "glDeleteSyncAPPLE" }, + { ClientWaitSyncAPPLE, "glClientWaitSyncAPPLE" }, + { WaitSyncAPPLE, "glWaitSyncAPPLE" }, + { GetInteger64vAPPLE, "glGetInteger64vAPPLE" }, + { GetSyncivAPPLE, "glGetSyncivAPPLE" }, + { MapBufferRangeEXT, "glMapBufferRangeEXT" }, + { FlushMappedBufferRangeEXT, "glFlushMappedBufferRangeEXT" }, + { RenderbufferStorageMultisampleEXT, "glRenderbufferStorageMultisampleEXT" }, + { FramebufferTexture2DMultisampleEXT, "glFramebufferTexture2DMultisampleEXT" }, + { GetGraphicsResetStatusEXT, "glGetGraphicsResetStatusEXT" }, + { ReadnPixelsEXT, "glReadnPixelsEXT" }, + { GetnUniformfvEXT, "glGetnUniformfvEXT" }, + { GetnUniformivEXT, "glGetnUniformivEXT" }, + { TexStorage1DEXT, "glTexStorage1DEXT" }, + { TexStorage2DEXT, "glTexStorage2DEXT" }, + { TexStorage3DEXT, "glTexStorage3DEXT" }, + { TextureStorage1DEXT, "glTextureStorage1DEXT" }, + { TextureStorage2DEXT, "glTextureStorage2DEXT" }, + { TextureStorage3DEXT, "glTextureStorage3DEXT" }, +#if !defined(GL_KHR_debug) + { GLDEBUGPROCKHR, "glGLDEBUGPROCKHR" }, +#endif + { DebugMessageControlKHR, "glDebugMessageControlKHR" }, + { DebugMessageInsertKHR, "glDebugMessageInsertKHR" }, + { DebugMessageCallbackKHR, "glDebugMessageCallbackKHR" }, + { GetDebugMessageLogKHR, "glGetDebugMessageLogKHR" }, + { PushDebugGroupKHR, "glPushDebugGroupKHR" }, + { PopDebugGroupKHR, "glPopDebugGroupKHR" }, + { ObjectLabelKHR, "glObjectLabelKHR" }, + { GetObjectLabelKHR, "glGetObjectLabelKHR" }, + { ObjectPtrLabelKHR, "glObjectPtrLabelKHR" }, + { GetObjectPtrLabelKHR, "glGetObjectPtrLabelKHR" }, + { GetPointervKHR, "glGetPointervKHR" }, + { DrawArraysInstancedANGLE, "glDrawArraysInstancedANGLE" }, + { DrawElementsInstancedANGLE, "glDrawElementsInstancedANGLE" }, + { VertexAttribDivisorANGLE, "glVertexAttribDivisorANGLE" }, + { GetTranslatedShaderSourceANGLE, "glGetTranslatedShaderSourceANGLE" }, + { LabelObjectEXT, "glLabelObjectEXT" }, + { GetObjectLabelEXT, "glGetObjectLabelEXT" }, + { InsertEventMarkerEXT, "glInsertEventMarkerEXT" }, + { PushGroupMarkerEXT, "glPushGroupMarkerEXT" }, + { PopGroupMarkerEXT, "glPopGroupMarkerEXT" }, + { GenQueriesEXT, "glGenQueriesEXT" }, + { DeleteQueriesEXT, "glDeleteQueriesEXT" }, + { IsQueryEXT, "glIsQueryEXT" }, + { BeginQueryEXT, "glBeginQueryEXT" }, + { EndQueryEXT, "glEndQueryEXT" }, + { GetQueryivEXT, "glGetQueryivEXT" }, + { GetQueryObjectuivEXT, "glGetQueryObjectuivEXT" }, + { UseProgramStagesEXT, "glUseProgramStagesEXT" }, + { ActiveShaderProgramEXT, "glActiveShaderProgramEXT" }, + { CreateShaderProgramvEXT, "glCreateShaderProgramvEXT" }, + { BindProgramPipelineEXT, "glBindProgramPipelineEXT" }, + { DeleteProgramPipelinesEXT, "glDeleteProgramPipelinesEXT" }, + { GenProgramPipelinesEXT, "glGenProgramPipelinesEXT" }, + { IsProgramPipelineEXT, "glIsProgramPipelineEXT" }, + { ProgramParameteriEXT, "glProgramParameteriEXT" }, + { GetProgramPipelineivEXT, "glGetProgramPipelineivEXT" }, + { ProgramUniform1iEXT, "glProgramUniform1iEXT" }, + { ProgramUniform2iEXT, "glProgramUniform2iEXT" }, + { ProgramUniform3iEXT, "glProgramUniform3iEXT" }, + { ProgramUniform4iEXT, "glProgramUniform4iEXT" }, + { ProgramUniform1fEXT, "glProgramUniform1fEXT" }, + { ProgramUniform2fEXT, "glProgramUniform2fEXT" }, + { ProgramUniform3fEXT, "glProgramUniform3fEXT" }, + { ProgramUniform4fEXT, "glProgramUniform4fEXT" }, + { ProgramUniform1ivEXT, "glProgramUniform1ivEXT" }, + { ProgramUniform2ivEXT, "glProgramUniform2ivEXT" }, + { ProgramUniform3ivEXT, "glProgramUniform3ivEXT" }, + { ProgramUniform4ivEXT, "glProgramUniform4ivEXT" }, + { ProgramUniform1fvEXT, "glProgramUniform1fvEXT" }, + { ProgramUniform2fvEXT, "glProgramUniform2fvEXT" }, + { ProgramUniform3fvEXT, "glProgramUniform3fvEXT" }, + { ProgramUniform4fvEXT, "glProgramUniform4fvEXT" }, + { ProgramUniformMatrix2fvEXT, "glProgramUniformMatrix2fvEXT" }, + { ProgramUniformMatrix3fvEXT, "glProgramUniformMatrix3fvEXT" }, + { ProgramUniformMatrix4fvEXT, "glProgramUniformMatrix4fvEXT" }, + { ValidateProgramPipelineEXT, "glValidateProgramPipelineEXT" }, + { GetProgramPipelineInfoLogEXT, "glGetProgramPipelineInfoLogEXT" }, + { ProgramUniform1uiEXT, "glProgramUniform1uiEXT" }, + { ProgramUniform2uiEXT, "glProgramUniform2uiEXT" }, + { ProgramUniform3uiEXT, "glProgramUniform3uiEXT" }, + { ProgramUniform4uiEXT, "glProgramUniform4uiEXT" }, + { ProgramUniform1uivEXT, "glProgramUniform1uivEXT" }, + { ProgramUniform2uivEXT, "glProgramUniform2uivEXT" }, + { ProgramUniform3uivEXT, "glProgramUniform3uivEXT" }, + { ProgramUniform4uivEXT, "glProgramUniform4uivEXT" }, + { ProgramUniformMatrix2x3fvEXT, "glProgramUniformMatrix2x3fvEXT" }, + { ProgramUniformMatrix3x2fvEXT, "glProgramUniformMatrix3x2fvEXT" }, + { ProgramUniformMatrix2x4fvEXT, "glProgramUniformMatrix2x4fvEXT" }, + { ProgramUniformMatrix4x2fvEXT, "glProgramUniformMatrix4x2fvEXT" }, + { ProgramUniformMatrix3x4fvEXT, "glProgramUniformMatrix3x4fvEXT" }, + { ProgramUniformMatrix4x3fvEXT, "glProgramUniformMatrix4x3fvEXT" }, + { AlphaFuncQCOM, "glAlphaFuncQCOM" }, + { ReadBufferNV, "glReadBufferNV" }, + { DrawBuffersNV, "glDrawBuffersNV" }, + { ReadBufferIndexedEXT, "glReadBufferIndexedEXT" }, + { DrawBuffersIndexedEXT, "glDrawBuffersIndexedEXT" }, + { GetIntegeri_vEXT, "glGetIntegeri_vEXT" }, + { DrawBuffersEXT, "glDrawBuffersEXT" }, + { BlendEquationEXT, "glBlendEquationEXT" }, + { BlendBarrierKHR, "glBlendBarrierKHR" }, + { TexStorage3DMultisampleOES, "glTexStorage3DMultisampleOES" }, + { FramebufferTextureMultiviewOVR, "glFramebufferTextureMultiviewOVR" }, + { FramebufferPixelLocalStorageSizeEXT, "glFramebufferPixelLocalStorageSizeEXT" }, + { ClearPixelLocalStorageuiEXT, "glClearPixelLocalStorageuiEXT" }, + { GetFramebufferPixelLocalStorageSizeEXT, "glGetFramebufferPixelLocalStorageSize" }, + { BufferStorageEXT, "glBufferStorageEXT" }, + { ClearTexImageIMG, "glClearTexImageIMG" }, + { ClearTexSubImageIMG, "glClearTexSubImageIMG" }, + { ClearTexImageEXT, "glClearTexImageEXT" }, + { ClearTexSubImageEXT, "glClearTexSubImageEXT" }, + { FramebufferTexture2DDownsampleIMG, "glFramebufferTexture2DDownsampleIMG" }, + { FramebufferTextureLayerDownsampleIMG, "glFramebufferTextureLayerDownsampleIMG" }, + { PatchParameteriEXT, "glPatchParameteriEXT" }, + +#ifdef GL_IMG_bindless_texture + { GetTextureHandleIMG, "glGetTextureHandleIMG" }, + { GetTextureSamplerHandleIMG, "glGetTextureSamplerHandleIMG" }, + { UniformHandleui64IMG, "glUniformHandleui64IMG" }, + { UniformHandleui64vIMG, "glUniformHandleui64vIMG" }, + { ProgramUniformHandleui64IMG, "glProgramUniformHandleui64IMG" }, + { ProgramUniformHandleui64vIMG, "glProgramUniformHandleui64vIMG" } +#endif + +}; +} + +#if !TARGET_OS_IPHONE +static inline void* GetGlesExtensionFunction(const char* funcName) +{ + return (void*)DYNAMICEGL_CALL_FUNCTION(GetProcAddress)(funcName); +} +#endif + +static inline bool isExtensionSupported(const unsigned char* const extensionString, const char* const extension) +{ + if (!extensionString) + { + return false; + } + + // The recommended technique for querying OpenGL extensions; + // from http://opengl.org/resources/features/OGLextensions/ + const char* start = (const char*)extensionString; + char* position, *terminator; + + // Extension names should not have spaces. + position = (char*)strchr(extension, ' '); + + if (position || *extension == '\0') + { + return 0; + } + + /* It takes a bit of care to be fool-proof about parsing the + OpenGL extensions string. Don't be fooled by sub-strings, etc. */ + for (;;) + { + position = (char*)strstr((char*)start, extension); + + if (!position) + { + break; + } + + terminator = position + strlen(extension); + + if (position == start || *(position - 1) == ' ') + { + if (*terminator == ' ' || *terminator == '\0') + { + return true; + } + } + + start = terminator; + } + + return false; +} + +inline void* getGlesExtFunction(gl::internals::GlExtFuncName::OpenGLESExtFunctionName funcname, bool reset = false) +{ + static void* FunctionTable[GlExtFuncName::NUMBER_OF_OPENGLEXT_FUNCTIONS + 1] = {0}; + +#if TARGET_OS_IPHONE + FunctionTable[GlExtFuncName::DiscardFramebufferEXT] = (void*)&glDiscardFramebufferEXT; + FunctionTable[GlExtFuncName::MapBufferOES] = (void*)&glMapBufferOES; + FunctionTable[GlExtFuncName::UnmapBufferOES] = (void*)&glUnmapBufferOES; + FunctionTable[GlExtFuncName::GetBufferPointervOES] = (void*)&glGetBufferPointervOES; + FunctionTable[GlExtFuncName::BindVertexArrayOES] = (void*)&glBindVertexArrayOES; + FunctionTable[GlExtFuncName::DeleteVertexArraysOES] = (void*)&glDeleteVertexArraysOES; + FunctionTable[GlExtFuncName::GenVertexArraysOES] = (void*)&glGenVertexArraysOES; + FunctionTable[GlExtFuncName::IsVertexArrayOES] = (void*)&glIsVertexArrayOES; + + FunctionTable[GlExtFuncName::GetProgramBinaryOES] = (void*)&glGetProgramBinary; + FunctionTable[GlExtFuncName::ProgramBinaryOES] = (void*)&glProgramBinary; + FunctionTable[GlExtFuncName::TexImage3DOES] = (void*)&glTexImage3D; + FunctionTable[GlExtFuncName::TexSubImage3DOES] = (void*)&glTexSubImage3D; + FunctionTable[GlExtFuncName::CopyTexSubImage3DOES] = (void*)&glCopyTexSubImage3D; + FunctionTable[GlExtFuncName::CompressedTexImage3DOES] = (void*)&glCompressedTexImage3D; + FunctionTable[GlExtFuncName::CompressedTexSubImage3DOES] = (void*)&glCompressedTexSubImage3D; + FunctionTable[GlExtFuncName::BlendEquationSeparateOES] = (void*)&glBlendEquationSeparate; + FunctionTable[GlExtFuncName::BlendFuncSeparateOES] = (void*)&glBlendFuncSeparate; + FunctionTable[GlExtFuncName::BlendEquationOES] = (void*)&glBlendEquation; + + FunctionTable[GlExtFuncName::CopyTextureLevelsAPPLE] = (void*)&glCopyTextureLevelsAPPLE; + FunctionTable[GlExtFuncName::RenderbufferStorageMultisampleAPPLE] = (void*)&glRenderbufferStorageMultisampleAPPLE; + FunctionTable[GlExtFuncName::ResolveMultisampleFramebufferAPPLE] = (void*)&glResolveMultisampleFramebufferAPPLE; + FunctionTable[GlExtFuncName::FenceSyncAPPLE] = (void*)&glFenceSyncAPPLE; + FunctionTable[GlExtFuncName::IsSyncAPPLE] = (void*)&glIsSyncAPPLE; + FunctionTable[GlExtFuncName::DeleteSyncAPPLE] = (void*)&glDeleteSyncAPPLE; + FunctionTable[GlExtFuncName::ClientWaitSyncAPPLE] = (void*)&glClientWaitSyncAPPLE; + FunctionTable[GlExtFuncName::WaitSyncAPPLE] = (void*)&glWaitSyncAPPLE; + FunctionTable[GlExtFuncName::GetInteger64vAPPLE] = (void*)&glGetInteger64vAPPLE; + FunctionTable[GlExtFuncName::GetSyncivAPPLE] = (void*)&glGetSyncivAPPLE; + FunctionTable[GlExtFuncName::MapBufferRangeEXT] = (void*)&glMapBufferRangeEXT; + FunctionTable[GlExtFuncName::FlushMappedBufferRangeEXT] = (void*)&glFlushMappedBufferRangeEXT; + FunctionTable[GlExtFuncName::RenderbufferStorageMultisampleEXT] = (void*)&glRenderbufferStorageMultisample; + FunctionTable[GlExtFuncName::TexStorage2DEXT] = (void*)&glTexStorage2DEXT; + FunctionTable[GlExtFuncName::TextureStorage2DEXT] = (void*)&glTexStorage2DEXT; + FunctionTable[GlExtFuncName::GetObjectLabelKHR] = (void*)&glGetObjectLabelEXT; + FunctionTable[GlExtFuncName::LabelObjectEXT] = (void*)&glLabelObjectEXT; + FunctionTable[GlExtFuncName::GetObjectLabelEXT] = (void*)&glGetObjectLabelEXT; + FunctionTable[GlExtFuncName::InsertEventMarkerEXT] = (void*)&glInsertEventMarkerEXT; + FunctionTable[GlExtFuncName::PushGroupMarkerEXT] = (void*)&glPushGroupMarkerEXT; + FunctionTable[GlExtFuncName::PopGroupMarkerEXT] = (void*)&glPopGroupMarkerEXT; + FunctionTable[GlExtFuncName::GenQueriesEXT] = (void*)&glGenQueriesEXT; + FunctionTable[GlExtFuncName::DeleteQueriesEXT] = (void*)&glDeleteQueriesEXT; + FunctionTable[GlExtFuncName::IsQueryEXT] = (void*)&glIsQueryEXT; + FunctionTable[GlExtFuncName::BeginQueryEXT] = (void*)&glBeginQueryEXT; + FunctionTable[GlExtFuncName::EndQueryEXT] = (void*)&glEndQueryEXT; + FunctionTable[GlExtFuncName::GetQueryivEXT] = (void*)&glGetQueryivEXT; + FunctionTable[GlExtFuncName::GetQueryObjectuivEXT] = (void*)&glGetQueryObjectuivEXT; + FunctionTable[GlExtFuncName::UseProgramStagesEXT] = (void*)&glUseProgramStagesEXT; + FunctionTable[GlExtFuncName::ActiveShaderProgramEXT] = (void*)&glActiveShaderProgramEXT; + FunctionTable[GlExtFuncName::CreateShaderProgramvEXT] = (void*)&glCreateShaderProgramvEXT; + FunctionTable[GlExtFuncName::BindProgramPipelineEXT] = (void*)&glBindProgramPipelineEXT; + FunctionTable[GlExtFuncName::DeleteProgramPipelinesEXT] = (void*)&glDeleteProgramPipelinesEXT; + FunctionTable[GlExtFuncName::GenProgramPipelinesEXT] = (void*)&glGenProgramPipelinesEXT; + FunctionTable[GlExtFuncName::IsProgramPipelineEXT] = (void*)&glIsProgramPipelineEXT; + FunctionTable[GlExtFuncName::ProgramParameteriEXT] = (void*)&glProgramParameteriEXT; + FunctionTable[GlExtFuncName::GetProgramPipelineivEXT] = (void*)&glGetProgramPipelineivEXT; + FunctionTable[GlExtFuncName::ProgramUniform1iEXT] = (void*)&glProgramUniform1iEXT; + FunctionTable[GlExtFuncName::ProgramUniform2iEXT] = (void*)&glProgramUniform2iEXT; + FunctionTable[GlExtFuncName::ProgramUniform3iEXT] = (void*)&glProgramUniform3iEXT; + FunctionTable[GlExtFuncName::ProgramUniform4iEXT] = (void*)&glProgramUniform4iEXT; + FunctionTable[GlExtFuncName::ProgramUniform1fEXT] = (void*)&glProgramUniform1fEXT; + FunctionTable[GlExtFuncName::ProgramUniform2fEXT] = (void*)&glProgramUniform2fEXT; + FunctionTable[GlExtFuncName::ProgramUniform3fEXT] = (void*)&glProgramUniform3fEXT; + FunctionTable[GlExtFuncName::ProgramUniform4fEXT] = (void*)&glProgramUniform4fEXT; + FunctionTable[GlExtFuncName::ProgramUniform1ivEXT] = (void*)&glProgramUniform1ivEXT; + FunctionTable[GlExtFuncName::ProgramUniform2ivEXT] = (void*)&glProgramUniform2ivEXT; + FunctionTable[GlExtFuncName::ProgramUniform3ivEXT] = (void*)&glProgramUniform3ivEXT; + FunctionTable[GlExtFuncName::ProgramUniform4ivEXT] = (void*)&glProgramUniform4ivEXT; + FunctionTable[GlExtFuncName::ProgramUniform1fvEXT] = (void*)&glProgramUniform1fvEXT; + FunctionTable[GlExtFuncName::ProgramUniform2fvEXT] = (void*)&glProgramUniform2fvEXT; + FunctionTable[GlExtFuncName::ProgramUniform3fvEXT] = (void*)&glProgramUniform3fvEXT; + FunctionTable[GlExtFuncName::ProgramUniform4fvEXT] = (void*)&glProgramUniform4fvEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix2fvEXT] = (void*)&glProgramUniformMatrix2fvEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix3fvEXT] = (void*)&glProgramUniformMatrix3fvEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix4fvEXT] = (void*)&glProgramUniformMatrix4fvEXT; + FunctionTable[GlExtFuncName::ValidateProgramPipelineEXT] = (void*)&glValidateProgramPipelineEXT; + FunctionTable[GlExtFuncName::GetProgramPipelineInfoLogEXT] = (void*)&glGetProgramPipelineInfoLogEXT; + FunctionTable[GlExtFuncName::ProgramUniform1uiEXT] = (void*)&glProgramUniform1uiEXT; + FunctionTable[GlExtFuncName::ProgramUniform2uiEXT] = (void*)&glProgramUniform2uiEXT; + FunctionTable[GlExtFuncName::ProgramUniform3uiEXT] = (void*)&glProgramUniform3uiEXT; + FunctionTable[GlExtFuncName::ProgramUniform4uiEXT] = (void*)&glProgramUniform4uiEXT; + FunctionTable[GlExtFuncName::ProgramUniform1uivEXT] = (void*)&glProgramUniform1uivEXT; + FunctionTable[GlExtFuncName::ProgramUniform2uivEXT] = (void*)&glProgramUniform2uivEXT; + FunctionTable[GlExtFuncName::ProgramUniform3uivEXT] = (void*)&glProgramUniform3uivEXT; + FunctionTable[GlExtFuncName::ProgramUniform4uivEXT] = (void*)&glProgramUniform4uivEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix2x3fvEXT] = (void*)&glProgramUniformMatrix2x3fvEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix3x2fvEXT] = (void*)&glProgramUniformMatrix3x2fvEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix2x4fvEXT] = (void*)&glProgramUniformMatrix2x4fvEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix4x2fvEXT] = (void*)&glProgramUniformMatrix4x2fvEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix3x4fvEXT] = (void*)&glProgramUniformMatrix3x4fvEXT; + FunctionTable[GlExtFuncName::ProgramUniformMatrix4x3fvEXT] = (void*)&glProgramUniformMatrix4x3fvEXT; + FunctionTable[GlExtFuncName::GetIntegeri_vEXT] = (void*)&glGetIntegeri_v; + FunctionTable[GlExtFuncName::DrawBuffersEXT] = (void*)&glDrawBuffers; + FunctionTable[GlExtFuncName::BlendEquationEXT] = (void*)&glBlendEquation; +#else + // GET FUNCTION POINTERS --- ONCE!!!! /// + if (!FunctionTable[GlExtFuncName::NUMBER_OF_OPENGLEXT_FUNCTIONS] || reset) + { + const uint32_t numExtensionStrings = sizeof(GlExtFuncName::OpenGLESExtFunctionNamePairs) / sizeof(GlExtFuncName::OpenGLESExtFunctionNamePairs[0]); + + // Set the last element of the function table to avoid issues + FunctionTable[GlExtFuncName::NUMBER_OF_OPENGLEXT_FUNCTIONS] = (void*)1; + + for (uint32_t i = 0; i < numExtensionStrings; i++) + { + FunctionTable[i] = GetGlesExtensionFunction(GlExtFuncName::OpenGLESExtFunctionNamePairs[i].second); + } + } +#endif + return FunctionTable[funcname]; +} +} +} + +#ifndef DYNAMICGLES_NO_NAMESPACE +namespace gl { +namespace ext { +#elif TARGET_OS_IPHONE +namespace gl { +namespace internals { +#endif +inline void resetExtensionFunctionPointers() +{ + gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::OpenGLESExtFunctionName(0), true); +} + +inline void DYNAMICGLES_FUNCTION(MultiDrawElementsEXT)(GLenum mode, const GLsizei* count, GLenum type, const GLvoid** indices, GLsizei primcount) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLMULTIDRAWELEMENTSEXTPROC _MultiDrawElementsEXT = (PFNGLMULTIDRAWELEMENTSEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::MultiDrawElementsEXT); + return _MultiDrawElementsEXT(mode, count, type, indices, primcount); +#endif +} +inline void DYNAMICGLES_FUNCTION(MultiDrawArraysEXT)(GLenum mode, const GLint* first, const GLsizei* count, GLsizei primcount) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLMULTIDRAWARRAYSEXTPROC _MultiDrawArraysEXT = (PFNGLMULTIDRAWARRAYSEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::MultiDrawArraysEXT); + return _MultiDrawArraysEXT(mode, first, count, primcount); +#endif +} +inline void DYNAMICGLES_FUNCTION(DiscardFramebufferEXT)(GLenum target, GLsizei numAttachments, const GLenum* attachments) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDiscardFramebufferEXT) PFNGLDISCARDFRAMEBUFFEREXTPROC; +#endif + PFNGLDISCARDFRAMEBUFFEREXTPROC _DiscardFramebufferEXT = (PFNGLDISCARDFRAMEBUFFEREXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DiscardFramebufferEXT); + return _DiscardFramebufferEXT(target, numAttachments, attachments); +} +inline void* DYNAMICGLES_FUNCTION(MapBufferOES)(GLenum target, GLenum access) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glMapBufferOES) PFNGLMAPBUFFEROESPROC; +#endif + PFNGLMAPBUFFEROESPROC _MapBufferOES = (PFNGLMAPBUFFEROESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::MapBufferOES); + return _MapBufferOES(target, access); +} +inline GLboolean DYNAMICGLES_FUNCTION(UnmapBufferOES)(GLenum target) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUnmapBufferOES) PFNGLUNMAPBUFFEROESPROC; +#endif + PFNGLUNMAPBUFFEROESPROC _UnmapBufferOES = (PFNGLUNMAPBUFFEROESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::UnmapBufferOES); + return _UnmapBufferOES(target); +} +inline void DYNAMICGLES_FUNCTION(GetBufferPointervOES)(GLenum target, GLenum pname, void** params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetBufferPointervOES) PFNGLGETBUFFERPOINTERVOESPROC; +#endif + PFNGLGETBUFFERPOINTERVOESPROC _GetBufferPointervOES = (PFNGLGETBUFFERPOINTERVOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetBufferPointervOES); + return _GetBufferPointervOES(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(BindVertexArrayOES)(GLuint vertexarray) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindVertexArrayOES) PFNGLBINDVERTEXARRAYOESPROC; +#endif + PFNGLBINDVERTEXARRAYOESPROC _BindVertexArrayOES = (PFNGLBINDVERTEXARRAYOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::BindVertexArrayOES); + return _BindVertexArrayOES(vertexarray); +} +inline void DYNAMICGLES_FUNCTION(DeleteVertexArraysOES)(GLsizei n, const GLuint* vertexarrays) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteVertexArraysOES) PFNGLDELETEVERTEXARRAYSOESPROC; +#endif + PFNGLDELETEVERTEXARRAYSOESPROC _DeleteVertexArraysOES = (PFNGLDELETEVERTEXARRAYSOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DeleteVertexArraysOES); + return _DeleteVertexArraysOES(n, vertexarrays); +} +inline void DYNAMICGLES_FUNCTION(GenVertexArraysOES)(GLsizei n, GLuint* vertexarrays) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenVertexArraysOES) PFNGLGENVERTEXARRAYSOESPROC; +#endif + PFNGLGENVERTEXARRAYSOESPROC _GenVertexArraysOES = (PFNGLGENVERTEXARRAYSOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GenVertexArraysOES); + return _GenVertexArraysOES(n, vertexarrays); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsVertexArrayOES)(GLuint vertexarray) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsVertexArrayOES) PFNGLISVERTEXARRAYOESPROC; +#endif + PFNGLISVERTEXARRAYOESPROC _IsVertexArrayOES = (PFNGLISVERTEXARRAYOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::IsVertexArrayOES); + return _IsVertexArrayOES(vertexarray); +} +inline void DYNAMICGLES_FUNCTION(DeleteFencesNV)(GLsizei n, const GLuint* fences) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDELETEFENCESNVPROC _DeleteFencesNV = (PFNGLDELETEFENCESNVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DeleteFencesNV); + return _DeleteFencesNV(n, fences); +#endif +} +inline void DYNAMICGLES_FUNCTION(GenFencesNV)(GLsizei n, GLuint* fences) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGENFENCESNVPROC _GenFencesNV = (PFNGLGENFENCESNVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GenFencesNV); + return _GenFencesNV(n, fences); +#endif +} +inline GLboolean DYNAMICGLES_FUNCTION(IsFenceNV)(GLuint fence) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLISFENCENVPROC _IsFenceNV = (PFNGLISFENCENVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::IsFenceNV); + return _IsFenceNV(fence); +#endif +} +inline GLboolean DYNAMICGLES_FUNCTION(TestFenceNV)(GLuint fence) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTESTFENCENVPROC _TestFenceNV = (PFNGLTESTFENCENVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TestFenceNV); + return _TestFenceNV(fence); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetFenceivNV)(GLuint fence, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETFENCEIVNVPROC _GetFenceivNV = (PFNGLGETFENCEIVNVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetFenceivNV); + return _GetFenceivNV(fence, pname, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(FinishFenceNV)(GLuint fence) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLFINISHFENCENVPROC _FinishFenceNV = (PFNGLFINISHFENCENVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FinishFenceNV); + return _FinishFenceNV(fence); +#endif +} +inline void DYNAMICGLES_FUNCTION(SetFenceNV)(GLuint fence, GLenum condition) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLSETFENCENVPROC _SetFenceNV = (PFNGLSETFENCENVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::SetFenceNV); + return _SetFenceNV(fence, condition); +#endif +} +#if !TARGET_OS_IPHONE +inline void DYNAMICGLES_FUNCTION(EGLImageTargetTexture2DOES)(GLenum target, GLeglImageOES image) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEGLIMAGETARGETTEXTURE2DOESPROC _EGLImageTargetTexture2DOES = (PFNGLEGLIMAGETARGETTEXTURE2DOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::EGLImageTargetTexture2DOES); + return _EGLImageTargetTexture2DOES(target, image); +#endif +} +inline void DYNAMICGLES_FUNCTION(EGLImageTargetRenderbufferStorageOES)(GLenum target, GLeglImageOES image) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEGLIMAGETARGETRENDERBUFFERSTORAGEOESPROC _EGLImageTargetRenderbufferStorageOES = (PFNGLEGLIMAGETARGETRENDERBUFFERSTORAGEOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::EGLImageTargetRenderbufferStorageOES); + return _EGLImageTargetRenderbufferStorageOES(target, image); +#endif +} +#endif +inline void DYNAMICGLES_FUNCTION(RenderbufferStorageMultisampleIMG)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLRENDERBUFFERSTORAGEMULTISAMPLEIMGPROC _RenderbufferStorageMultisampleIMG = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::RenderbufferStorageMultisampleIMG); + return _RenderbufferStorageMultisampleIMG(target, samples, internalformat, width, height); +#endif +} +inline void DYNAMICGLES_FUNCTION(FramebufferTexture2DMultisampleIMG)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEIMGPROC _FramebufferTexture2DMultisampleIMG = (PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FramebufferTexture2DMultisampleIMG); + return _FramebufferTexture2DMultisampleIMG(target, attachment, textarget, texture, level, samples); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetPerfMonitorGroupsAMD)(GLint* numGroups, GLsizei groupsSize, GLuint* groups) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPERFMONITORGROUPSAMDPROC _GetPerfMonitorGroupsAMD = (PFNGLGETPERFMONITORGROUPSAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetPerfMonitorGroupsAMD); + return _GetPerfMonitorGroupsAMD(numGroups, groupsSize, groups); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetPerfMonitorCountersAMD)(GLuint group, GLint* numCounters, GLint* maxActiveCounters, GLsizei counterSize, GLuint* counters) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPERFMONITORCOUNTERSAMDPROC _GetPerfMonitorCountersAMD = (PFNGLGETPERFMONITORCOUNTERSAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetPerfMonitorCountersAMD); + return _GetPerfMonitorCountersAMD(group, numCounters, maxActiveCounters, counterSize, counters); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetPerfMonitorGroupStringAMD)(GLuint group, GLsizei bufSize, GLsizei* length, char* groupString) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPERFMONITORGROUPSTRINGAMDPROC _GetPerfMonitorGroupStringAMD = (PFNGLGETPERFMONITORGROUPSTRINGAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetPerfMonitorGroupStringAMD); + return _GetPerfMonitorGroupStringAMD(group, bufSize, length, groupString); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetPerfMonitorCounterStringAMD)(GLuint group, GLuint counter, GLsizei bufSize, GLsizei* length, char* counterString) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPERFMONITORCOUNTERSTRINGAMDPROC _GetPerfMonitorCounterStringAMD = (PFNGLGETPERFMONITORCOUNTERSTRINGAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetPerfMonitorCounterStringAMD); + return _GetPerfMonitorCounterStringAMD(group, counter, bufSize, length, counterString); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetPerfMonitorCounterInfoAMD)(GLuint group, GLuint counter, GLenum pname, GLvoid* data) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPERFMONITORCOUNTERINFOAMDPROC _GetPerfMonitorCounterInfoAMD = (PFNGLGETPERFMONITORCOUNTERINFOAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetPerfMonitorCounterInfoAMD); + return _GetPerfMonitorCounterInfoAMD(group, counter, pname, data); +#endif +} +inline void DYNAMICGLES_FUNCTION(GenPerfMonitorsAMD)(GLsizei n, GLuint* monitors) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGENPERFMONITORSAMDPROC _GenPerfMonitorsAMD = (PFNGLGENPERFMONITORSAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GenPerfMonitorsAMD); + return _GenPerfMonitorsAMD(n, monitors); +#endif +} +inline void DYNAMICGLES_FUNCTION(DeletePerfMonitorsAMD)(GLsizei n, GLuint* monitors) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDELETEPERFMONITORSAMDPROC _DeletePerfMonitorsAMD = (PFNGLDELETEPERFMONITORSAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DeletePerfMonitorsAMD); + return _DeletePerfMonitorsAMD(n, monitors); +#endif +} +inline void DYNAMICGLES_FUNCTION(SelectPerfMonitorCountersAMD)(GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint* countersList) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLSELECTPERFMONITORCOUNTERSAMDPROC _SelectPerfMonitorCountersAMD = (PFNGLSELECTPERFMONITORCOUNTERSAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::SelectPerfMonitorCountersAMD); + return _SelectPerfMonitorCountersAMD(monitor, enable, group, numCounters, countersList); +#endif +} +inline void DYNAMICGLES_FUNCTION(BeginPerfMonitorAMD)(GLuint monitor) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBEGINPERFMONITORAMDPROC _BeginPerfMonitorAMD = (PFNGLBEGINPERFMONITORAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::BeginPerfMonitorAMD); + return _BeginPerfMonitorAMD(monitor); +#endif +} +inline void DYNAMICGLES_FUNCTION(EndPerfMonitorAMD)(GLuint monitor) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLENDPERFMONITORAMDPROC _EndPerfMonitorAMD = (PFNGLENDPERFMONITORAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::EndPerfMonitorAMD); + return _EndPerfMonitorAMD(monitor); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetPerfMonitorCounterDataAMD)(GLuint monitor, GLenum pname, GLsizei dataSize, GLuint* data, GLint* bytesWritten) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPERFMONITORCOUNTERDATAAMDPROC _GetPerfMonitorCounterDataAMD = (PFNGLGETPERFMONITORCOUNTERDATAAMDPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetPerfMonitorCounterDataAMD); + return _GetPerfMonitorCounterDataAMD(monitor, pname, dataSize, data, bytesWritten); +#endif +} +inline void DYNAMICGLES_FUNCTION(BlitFramebufferANGLE)(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBLITFRAMEBUFFERANGLEPROC _BlitFramebufferANGLE = (PFNGLBLITFRAMEBUFFERANGLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::BlitFramebufferANGLE); + return _BlitFramebufferANGLE(srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter); +#endif +} +inline void DYNAMICGLES_FUNCTION(RenderbufferStorageMultisampleANGLE)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLRENDERBUFFERSTORAGEMULTISAMPLEANGLEPROC _RenderbufferStorageMultisampleANGLE = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEANGLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::RenderbufferStorageMultisampleANGLE); + return _RenderbufferStorageMultisampleANGLE(target, samples, internalformat, width, height); +#endif +} +inline void DYNAMICGLES_FUNCTION(RenderbufferStorageMultisampleAPPLE)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glRenderbufferStorageMultisampleAPPLE) PFNGLRENDERBUFFERSTORAGEMULTISAMPLEAPPLEPROC; +#endif + PFNGLRENDERBUFFERSTORAGEMULTISAMPLEAPPLEPROC _RenderbufferStorageMultisampleAPPLE = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::RenderbufferStorageMultisampleAPPLE); + return _RenderbufferStorageMultisampleAPPLE(target, samples, internalformat, width, height); +} +inline void DYNAMICGLES_FUNCTION(ResolveMultisampleFramebufferAPPLE)(void) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glResolveMultisampleFramebufferAPPLE) PFNGLRESOLVEMULTISAMPLEFRAMEBUFFERAPPLEPROC; +#endif + PFNGLRESOLVEMULTISAMPLEFRAMEBUFFERAPPLEPROC _ResolveMultisampleFramebufferAPPLE = (PFNGLRESOLVEMULTISAMPLEFRAMEBUFFERAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ResolveMultisampleFramebufferAPPLE); + return _ResolveMultisampleFramebufferAPPLE(); +} +inline void DYNAMICGLES_FUNCTION(CoverageMaskNV)(GLboolean mask) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLCOVERAGEMASKNVPROC _CoverageMaskNV = (PFNGLCOVERAGEMASKNVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::CoverageMaskNV); + return _CoverageMaskNV(mask); +#endif +} +inline void DYNAMICGLES_FUNCTION(CoverageOperationNV)(GLenum operation) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLCOVERAGEOPERATIONNVPROC _CoverageOperationNV = (PFNGLCOVERAGEOPERATIONNVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::CoverageOperationNV); + return _CoverageOperationNV(operation); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetDriverControlsQCOM)(GLint* num, GLsizei size, GLuint* driverControls) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETDRIVERCONTROLSQCOMPROC _GetDriverControlsQCOM = (PFNGLGETDRIVERCONTROLSQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetDriverControlsQCOM); + return _GetDriverControlsQCOM(num, size, driverControls); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetDriverControlStringQCOM)(GLuint driverControl, GLsizei bufSize, GLsizei* length, char* driverControlString) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETDRIVERCONTROLSTRINGQCOMPROC _GetDriverControlStringQCOM = (PFNGLGETDRIVERCONTROLSTRINGQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetDriverControlStringQCOM); + return _GetDriverControlStringQCOM(driverControl, bufSize, length, driverControlString); +#endif +} +inline void DYNAMICGLES_FUNCTION(EnableDriverControlQCOM)(GLuint driverControl) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLENABLEDRIVERCONTROLQCOMPROC _EnableDriverControlQCOM = (PFNGLENABLEDRIVERCONTROLQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::EnableDriverControlQCOM); + return _EnableDriverControlQCOM(driverControl); +#endif +} +inline void DYNAMICGLES_FUNCTION(DisableDriverControlQCOM)(GLuint driverControl) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDISABLEDRIVERCONTROLQCOMPROC _DisableDriverControlQCOM = (PFNGLDISABLEDRIVERCONTROLQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DisableDriverControlQCOM); + return _DisableDriverControlQCOM(driverControl); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetTexturesQCOM)(GLuint* textures, GLint maxTextures, GLint* numTextures) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETTEXTURESQCOMPROC _ExtGetTexturesQCOM = (PFNGLEXTGETTEXTURESQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetTexturesQCOM); + return _ExtGetTexturesQCOM(textures, maxTextures, numTextures); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetBuffersQCOM)(GLuint* buffers, GLint maxBuffers, GLint* numBuffers) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETBUFFERSQCOMPROC _ExtGetBuffersQCOM = (PFNGLEXTGETBUFFERSQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetBuffersQCOM); + return _ExtGetBuffersQCOM(buffers, maxBuffers, numBuffers); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetRenderbuffersQCOM)(GLuint* renderbuffers, GLint maxRenderbuffers, GLint* numRenderbuffers) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETRENDERBUFFERSQCOMPROC _ExtGetRenderbuffersQCOM = (PFNGLEXTGETRENDERBUFFERSQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetRenderbuffersQCOM); + return _ExtGetRenderbuffersQCOM(renderbuffers, maxRenderbuffers, numRenderbuffers); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetFramebuffersQCOM)(GLuint* framebuffers, GLint maxFramebuffers, GLint* numFramebuffers) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETFRAMEBUFFERSQCOMPROC _ExtGetFramebuffersQCOM = (PFNGLEXTGETFRAMEBUFFERSQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetFramebuffersQCOM); + return _ExtGetFramebuffersQCOM(framebuffers, maxFramebuffers, numFramebuffers); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetTexLevelParameterivQCOM)(GLuint texture, GLenum face, GLint level, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETTEXLEVELPARAMETERIVQCOMPROC _ExtGetTexLevelParameterivQCOM = (PFNGLEXTGETTEXLEVELPARAMETERIVQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetTexLevelParameterivQCOM); + return _ExtGetTexLevelParameterivQCOM(texture, face, level, pname, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtTexObjectStateOverrideiQCOM)(GLenum target, GLenum pname, GLint param) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTTEXOBJECTSTATEOVERRIDEIQCOMPROC _ExtTexObjectStateOverrideiQCOM = (PFNGLEXTTEXOBJECTSTATEOVERRIDEIQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtTexObjectStateOverrideiQCOM); + return _ExtTexObjectStateOverrideiQCOM(target, pname, param); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetTexSubImageQCOM)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, GLvoid* texels) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETTEXSUBIMAGEQCOMPROC _ExtGetTexSubImageQCOM = (PFNGLEXTGETTEXSUBIMAGEQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetTexSubImageQCOM); + return _ExtGetTexSubImageQCOM(target, level, xoffset, yoffset, zoffset, width, height, depth, format, type, texels); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetBufferPointervQCOM)(GLenum target, GLvoid** params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETBUFFERPOINTERVQCOMPROC _ExtGetBufferPointervQCOM = (PFNGLEXTGETBUFFERPOINTERVQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetBufferPointervQCOM); + return _ExtGetBufferPointervQCOM(target, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetShadersQCOM)(GLuint* shaders, GLint maxShaders, GLint* numShaders) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETSHADERSQCOMPROC _ExtGetShadersQCOM = (PFNGLEXTGETSHADERSQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetShadersQCOM); + return _ExtGetShadersQCOM(shaders, maxShaders, numShaders); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetProgramsQCOM)(GLuint* programs, GLint maxPrograms, GLint* numPrograms) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETPROGRAMSQCOMPROC _ExtGetProgramsQCOM = (PFNGLEXTGETPROGRAMSQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetProgramsQCOM); + return _ExtGetProgramsQCOM(programs, maxPrograms, numPrograms); +#endif +} +inline GLboolean DYNAMICGLES_FUNCTION(ExtIsProgramBinaryQCOM)(GLuint program) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTISPROGRAMBINARYQCOMPROC _ExtIsProgramBinaryQCOM = (PFNGLEXTISPROGRAMBINARYQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtIsProgramBinaryQCOM); + return _ExtIsProgramBinaryQCOM(program); +#endif +} +inline void DYNAMICGLES_FUNCTION(ExtGetProgramBinarySourceQCOM)(GLuint program, GLenum shadertype, char* source, GLint* length) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLEXTGETPROGRAMBINARYSOURCEQCOMPROC _ExtGetProgramBinarySourceQCOM = (PFNGLEXTGETPROGRAMBINARYSOURCEQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ExtGetProgramBinarySourceQCOM); + return _ExtGetProgramBinarySourceQCOM(program, shadertype, source, length); +#endif +} +inline void DYNAMICGLES_FUNCTION(StartTilingQCOM)(GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLSTARTTILINGQCOMPROC _StartTilingQCOM = (PFNGLSTARTTILINGQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::StartTilingQCOM); + return _StartTilingQCOM(x, y, width, height, preserveMask); +#endif +} +inline void DYNAMICGLES_FUNCTION(EndTilingQCOM)(GLbitfield preserveMask) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLENDTILINGQCOMPROC _EndTilingQCOM = (PFNGLENDTILINGQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::EndTilingQCOM); + return _EndTilingQCOM(preserveMask); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetProgramBinaryOES)(GLuint program, GLsizei bufSize, GLsizei* length, GLenum* binaryFormat, GLvoid* binary) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPROGRAMBINARYOESPROC _GetProgramBinaryOES = (PFNGLGETPROGRAMBINARYOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetProgramBinaryOES); + return _GetProgramBinaryOES(program, bufSize, length, binaryFormat, binary); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramBinaryOES)(GLuint program, GLenum binaryFormat, const GLvoid* binary, GLint length) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMBINARYOESPROC _ProgramBinaryOES = (PFNGLPROGRAMBINARYOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramBinaryOES); + return _ProgramBinaryOES(program, binaryFormat, binary, length); +#endif +} +inline void DYNAMICGLES_FUNCTION(TexImage3DOES)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* pixels) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXIMAGE3DOESPROC _TexImage3DOES = (PFNGLTEXIMAGE3DOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TexImage3DOES); + return _TexImage3DOES(target, level, internalformat, width, height, depth, border, format, type, pixels); +#endif +} +inline void DYNAMICGLES_FUNCTION(TexSubImage3DOES)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* pixels) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXSUBIMAGE3DOESPROC _TexSubImage3DOES = (PFNGLTEXSUBIMAGE3DOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TexSubImage3DOES); + return _TexSubImage3DOES(target, level, xoffset, yoffset, zoffset, width, height, depth, format, type, pixels); +#endif +} +inline void DYNAMICGLES_FUNCTION(CopyTexSubImage3DOES)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLCOPYTEXSUBIMAGE3DOESPROC _CopyTexSubImage3DOES = (PFNGLCOPYTEXSUBIMAGE3DOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::CopyTexSubImage3DOES); + return _CopyTexSubImage3DOES(target, level, xoffset, yoffset, zoffset, x, y, width, height); +#endif +} +inline void DYNAMICGLES_FUNCTION(CompressedTexImage3DOES)(GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid* data) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLCOMPRESSEDTEXIMAGE3DOESPROC _CompressedTexImage3DOES = (PFNGLCOMPRESSEDTEXIMAGE3DOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::CompressedTexImage3DOES); + return _CompressedTexImage3DOES(target, level, internalformat, width, height, depth, border, imageSize, data); +#endif +} +inline void DYNAMICGLES_FUNCTION(CompressedTexSubImage3DOES)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid* data) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLCOMPRESSEDTEXSUBIMAGE3DOESPROC _CompressedTexSubImage3DOES = (PFNGLCOMPRESSEDTEXSUBIMAGE3DOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::CompressedTexSubImage3DOES); + return _CompressedTexSubImage3DOES(target, level, xoffset, yoffset, zoffset, width, height, depth, format, imageSize, data); +#endif +} +inline void DYNAMICGLES_FUNCTION(FramebufferTexture3DOES)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLFRAMEBUFFERTEXTURE3DOESPROC _FramebufferTexture3DOES = (PFNGLFRAMEBUFFERTEXTURE3DOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FramebufferTexture3DOES); + return _FramebufferTexture3DOES(target, attachment, textarget, texture, level, zoffset); +#endif +} +inline void DYNAMICGLES_FUNCTION(BlendEquationSeparateOES)(GLenum modeRGB, GLenum modeAlpha) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBLENDEQUATIONSEPARATEPROC _BlendEquationSeparateOES = (PFNGLBLENDEQUATIONSEPARATEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::BlendEquationSeparateOES); + return _BlendEquationSeparateOES(modeRGB, modeAlpha); +#endif +} + +inline void DYNAMICGLES_FUNCTION(CopyTextureLevelsAPPLE)(GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glCopyTextureLevelsAPPLE) PFNGLCOPYTEXTURELEVELSAPPLEPROC; +#endif + PFNGLCOPYTEXTURELEVELSAPPLEPROC _CopyTextureLevelsAPPLE = (PFNGLCOPYTEXTURELEVELSAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::CopyTextureLevelsAPPLE); + return _CopyTextureLevelsAPPLE(destinationTexture, sourceTexture, sourceBaseLevel, sourceLevelCount); +} +inline GLsync DYNAMICGLES_FUNCTION(FenceSyncAPPLE)(GLenum condition, GLbitfield flags) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFenceSyncAPPLE) PFNGLFENCESYNCAPPLEPROC; +#endif + PFNGLFENCESYNCAPPLEPROC _FenceSyncAPPLE = (PFNGLFENCESYNCAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FenceSyncAPPLE); + return _FenceSyncAPPLE(condition, flags); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsSyncAPPLE)(GLsync sync) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsSyncAPPLE) PFNGLISSYNCAPPLEPROC; +#endif + PFNGLISSYNCAPPLEPROC _IsSyncAPPLE = (PFNGLISSYNCAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::IsSyncAPPLE); + return _IsSyncAPPLE(sync); +} +inline void DYNAMICGLES_FUNCTION(DeleteSyncAPPLE)(GLsync sync) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteSyncAPPLE) PFNGLDELETESYNCAPPLEPROC; +#endif + PFNGLDELETESYNCAPPLEPROC _DeleteSyncAPPLE = (PFNGLDELETESYNCAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DeleteSyncAPPLE); + return _DeleteSyncAPPLE(sync); +} +inline GLenum DYNAMICGLES_FUNCTION(ClientWaitSyncAPPLE)(GLsync sync, GLbitfield flags, GLuint64 timeout) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glClientWaitSyncAPPLE) PFNGLCLIENTWAITSYNCAPPLEPROC; +#endif + PFNGLCLIENTWAITSYNCAPPLEPROC _ClientWaitSyncAPPLE = (PFNGLCLIENTWAITSYNCAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ClientWaitSyncAPPLE); + return _ClientWaitSyncAPPLE(sync, flags, timeout); +} +inline void DYNAMICGLES_FUNCTION(WaitSyncAPPLE)(GLsync sync, GLbitfield flags, GLuint64 timeout) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glWaitSyncAPPLE) PFNGLWAITSYNCAPPLEPROC; +#endif + PFNGLWAITSYNCAPPLEPROC _WaitSyncAPPLE = (PFNGLWAITSYNCAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::WaitSyncAPPLE); + return _WaitSyncAPPLE(sync, flags, timeout); +} +inline void DYNAMICGLES_FUNCTION(GetInteger64vAPPLE)(GLenum pname, GLint64* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetInteger64vAPPLE) PFNGLGETINTEGER64VAPPLEPROC; +#endif + PFNGLGETINTEGER64VAPPLEPROC _GetInteger64vAPPLE = (PFNGLGETINTEGER64VAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetInteger64vAPPLE); + return _GetInteger64vAPPLE(pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetSyncivAPPLE)(GLsync sync, GLenum pname, GLsizei bufSize, GLsizei* length, GLint* values) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetSyncivAPPLE) PFNGLGETSYNCIVAPPLEPROC; +#endif + PFNGLGETSYNCIVAPPLEPROC _GetSyncivAPPLE = (PFNGLGETSYNCIVAPPLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetSyncivAPPLE); + return _GetSyncivAPPLE(sync, pname, bufSize, length, values); +} +inline void* DYNAMICGLES_FUNCTION(MapBufferRangeEXT)(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glMapBufferRangeEXT) PFNGLMAPBUFFERRANGEEXTPROC; +#endif + PFNGLMAPBUFFERRANGEEXTPROC _MapBufferRangeEXT = (PFNGLMAPBUFFERRANGEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::MapBufferRangeEXT); + return _MapBufferRangeEXT(target, offset, length, access); +} +inline void DYNAMICGLES_FUNCTION(FlushMappedBufferRangeEXT)(GLenum target, GLintptr offset, GLsizeiptr length) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glFlushMappedBufferRangeEXT) PFNGLFLUSHMAPPEDBUFFERRANGEEXTPROC; +#endif + PFNGLFLUSHMAPPEDBUFFERRANGEEXTPROC _FlushMappedBufferRangeEXT = (PFNGLFLUSHMAPPEDBUFFERRANGEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FlushMappedBufferRangeEXT); + return _FlushMappedBufferRangeEXT(target, offset, length); +} +inline void DYNAMICGLES_FUNCTION(RenderbufferStorageMultisampleEXT)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC _RenderbufferStorageMultisampleEXT = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::RenderbufferStorageMultisampleEXT); + return _RenderbufferStorageMultisampleEXT(target, samples, internalformat, width, height); +#endif +} +inline void DYNAMICGLES_FUNCTION(FramebufferTexture2DMultisampleEXT)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEEXTPROC _FramebufferTexture2DMultisampleEXT = (PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FramebufferTexture2DMultisampleEXT); + return _FramebufferTexture2DMultisampleEXT(target, attachment, textarget, texture, level, samples); +#endif +} +inline GLenum DYNAMICGLES_FUNCTION(GetGraphicsResetStatusEXT)(void) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETGRAPHICSRESETSTATUSEXTPROC _GetGraphicsResetStatusEXT = (PFNGLGETGRAPHICSRESETSTATUSEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetGraphicsResetStatusEXT); + return _GetGraphicsResetStatusEXT(); +#endif +} +inline void DYNAMICGLES_FUNCTION(ReadnPixelsEXT)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void* data) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLREADNPIXELSEXTPROC _ReadnPixelsEXT = (PFNGLREADNPIXELSEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ReadnPixelsEXT); + return _ReadnPixelsEXT(x, y, width, height, format, type, bufSize, data); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetnUniformfvEXT)(GLuint program, GLint location, GLsizei bufSize, float* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETNUNIFORMFVEXTPROC _GetnUniformfvEXT = (PFNGLGETNUNIFORMFVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetnUniformfvEXT); + return _GetnUniformfvEXT(program, location, bufSize, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetnUniformivEXT)(GLuint program, GLint location, GLsizei bufSize, GLint* params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETNUNIFORMIVEXTPROC _GetnUniformivEXT = (PFNGLGETNUNIFORMIVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetnUniformivEXT); + return _GetnUniformivEXT(program, location, bufSize, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(TexStorage1DEXT)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXSTORAGE1DEXTPROC _TexStorage1DEXT = (PFNGLTEXSTORAGE1DEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TexStorage1DEXT); + return _TexStorage1DEXT(target, levels, internalformat, width); +#endif +} +inline void DYNAMICGLES_FUNCTION(TexStorage2DEXT)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXSTORAGE2DEXTPROC _TexStorage2DEXT = (PFNGLTEXSTORAGE2DEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TexStorage2DEXT); + return _TexStorage2DEXT(target, levels, internalformat, width, height); +#endif +} +inline void DYNAMICGLES_FUNCTION(TexStorage3DEXT)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXSTORAGE3DEXTPROC _TexStorage3DEXT = (PFNGLTEXSTORAGE3DEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TexStorage3DEXT); + return _TexStorage3DEXT(target, levels, internalformat, width, height, depth); +#endif +} +inline void DYNAMICGLES_FUNCTION(TextureStorage1DEXT)(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXTURESTORAGE1DEXTPROC _TextureStorage1DEXT = (PFNGLTEXTURESTORAGE1DEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TextureStorage1DEXT); + return _TextureStorage1DEXT(texture, target, levels, internalformat, width); +#endif +} +inline void DYNAMICGLES_FUNCTION(TextureStorage2DEXT)(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXTURESTORAGE2DEXTPROC _TextureStorage2DEXT = (PFNGLTEXTURESTORAGE2DEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TextureStorage2DEXT); + return _TextureStorage2DEXT(texture, target, levels, internalformat, width, height); +#endif +} +inline void DYNAMICGLES_FUNCTION(TextureStorage3DEXT)(GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXTURESTORAGE3DEXTPROC _TextureStorage3DEXT = (PFNGLTEXTURESTORAGE3DEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TextureStorage3DEXT); + return _TextureStorage3DEXT(texture, target, levels, internalformat, width, height, depth); +#endif +} +inline void DYNAMICGLES_FUNCTION(DebugMessageControlKHR)(GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint* ids, GLboolean enabled) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDEBUGMESSAGECONTROLKHRPROC _DebugMessageControlKHR = (PFNGLDEBUGMESSAGECONTROLKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DebugMessageControlKHR); + return _DebugMessageControlKHR(source, type, severity, count, ids, enabled); +#endif +} +inline void DYNAMICGLES_FUNCTION(DebugMessageInsertKHR)(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* buf) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDEBUGMESSAGEINSERTKHRPROC _DebugMessageInsertKHR = (PFNGLDEBUGMESSAGEINSERTKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DebugMessageInsertKHR); + return _DebugMessageInsertKHR(source, type, id, severity, length, buf); +#endif + +} +#if !TARGET_OS_IPHONE +inline void DYNAMICGLES_FUNCTION(DebugMessageCallbackKHR)(GLDEBUGPROCKHR callback, const void* userParam) +{ + + PFNGLDEBUGMESSAGECALLBACKKHRPROC _DebugMessageCallbackKHR = (PFNGLDEBUGMESSAGECALLBACKKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DebugMessageCallbackKHR); + return _DebugMessageCallbackKHR(callback, userParam); +} +#endif +inline GLuint DYNAMICGLES_FUNCTION(GetDebugMessageLogKHR)(GLuint count, GLsizei bufsize, GLenum* sources, GLenum* types, GLuint* ids, GLenum* severities, GLsizei* lengths, GLchar* messageLog) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETDEBUGMESSAGELOGKHRPROC _GetDebugMessageLogKHR = (PFNGLGETDEBUGMESSAGELOGKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetDebugMessageLogKHR); + return _GetDebugMessageLogKHR(count, bufsize, sources, types, ids, severities, lengths, messageLog); +#endif +} +inline void DYNAMICGLES_FUNCTION(PushDebugGroupKHR)(GLenum source, GLuint id, GLsizei length, const GLchar* message) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPUSHDEBUGGROUPKHRPROC _PushDebugGroupKHR = (PFNGLPUSHDEBUGGROUPKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::PushDebugGroupKHR); + return _PushDebugGroupKHR(source, id, length, message); +#endif + +} +inline void DYNAMICGLES_FUNCTION(PopDebugGroupKHR)(void) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPOPDEBUGGROUPKHRPROC _PopDebugGroupKHR = (PFNGLPOPDEBUGGROUPKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::PopDebugGroupKHR); + return _PopDebugGroupKHR(); +#endif +} +inline void DYNAMICGLES_FUNCTION(ObjectLabelKHR)(GLenum identifier, GLuint name, GLsizei length, const GLchar* label) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLOBJECTLABELKHRPROC _ObjectLabelKHR = (PFNGLOBJECTLABELKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ObjectLabelKHR); + return _ObjectLabelKHR(identifier, name, length, label); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetObjectLabelKHR)(GLenum identifier, GLuint name, GLsizei bufSize, GLsizei* length, GLchar* label) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETOBJECTLABELKHRPROC _GetObjectLabelKHR = (PFNGLGETOBJECTLABELKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetObjectLabelKHR); + return _GetObjectLabelKHR(identifier, name, bufSize, length, label); +#endif +} +inline void DYNAMICGLES_FUNCTION(ObjectPtrLabelKHR)(const void* ptr, GLsizei length, const GLchar* label) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLOBJECTPTRLABELKHRPROC _ObjectPtrLabelKHR = (PFNGLOBJECTPTRLABELKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ObjectPtrLabelKHR); + return _ObjectPtrLabelKHR(ptr, length, label); +#endif + +} +inline void DYNAMICGLES_FUNCTION(GetObjectPtrLabelKHR)(const void* ptr, GLsizei bufSize, GLsizei* length, GLchar* label) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETOBJECTPTRLABELKHRPROC _GetObjectPtrLabelKHR = (PFNGLGETOBJECTPTRLABELKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetObjectPtrLabelKHR); + return _GetObjectPtrLabelKHR(ptr, bufSize, length, label); +#endif + +} +inline void DYNAMICGLES_FUNCTION(GetPointervKHR)(GLenum pname, void** params) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETPOINTERVKHRPROC _GetPointervKHR = (PFNGLGETPOINTERVKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetPointervKHR); + return _GetPointervKHR(pname, params); +#endif +} +inline void DYNAMICGLES_FUNCTION(DrawArraysInstancedANGLE)(GLenum mode, GLint first, GLsizei count, GLsizei primcount) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDRAWARRAYSINSTANCEDANGLEPROC _DrawArraysInstancedANGLE = (PFNGLDRAWARRAYSINSTANCEDANGLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DrawArraysInstancedANGLE); + return _DrawArraysInstancedANGLE(mode, first, count, primcount); +#endif +} +inline void DYNAMICGLES_FUNCTION(DrawElementsInstancedANGLE)(GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei primcount) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDRAWELEMENTSINSTANCEDANGLEPROC _DrawElementsInstancedANGLE = (PFNGLDRAWELEMENTSINSTANCEDANGLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DrawElementsInstancedANGLE); + return _DrawElementsInstancedANGLE(mode, count, type, indices, primcount); +#endif +} +inline void DYNAMICGLES_FUNCTION(VertexAttribDivisorANGLE)(GLuint index, GLuint divisor) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLVERTEXATTRIBDIVISORANGLEPROC _VertexAttribDivisorANGLE = (PFNGLVERTEXATTRIBDIVISORANGLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::VertexAttribDivisorANGLE); + return _VertexAttribDivisorANGLE(index, divisor); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetTranslatedShaderSourceANGLE)(GLuint shader, GLsizei bufsize, GLsizei* length, GLchar* source) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETTRANSLATEDSHADERSOURCEANGLEPROC _GetTranslatedShaderSourceANGLE = (PFNGLGETTRANSLATEDSHADERSOURCEANGLEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetTranslatedShaderSourceANGLE); + return _GetTranslatedShaderSourceANGLE(shader, bufsize, length, source); +#endif + +} +inline void DYNAMICGLES_FUNCTION(LabelObjectEXT)(GLenum type, GLuint object, GLsizei length, const GLchar* label) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLLABELOBJECTEXTPROC _LabelObjectEXT = (PFNGLLABELOBJECTEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::LabelObjectEXT); + return _LabelObjectEXT(type, object, length, label); +#endif + +} +inline void DYNAMICGLES_FUNCTION(GetObjectLabelEXT)(GLenum type, GLuint object, GLsizei bufSize, GLsizei* length, GLchar* label) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETOBJECTLABELEXTPROC _GetObjectLabelEXT = (PFNGLGETOBJECTLABELEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetObjectLabelEXT); + return _GetObjectLabelEXT(type, object, bufSize, length, label); +#endif + +} +inline void DYNAMICGLES_FUNCTION(InsertEventMarkerEXT)(GLsizei length, const GLchar* marker) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLINSERTEVENTMARKEREXTPROC _InsertEventMarkerEXT = (PFNGLINSERTEVENTMARKEREXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::InsertEventMarkerEXT); + return _InsertEventMarkerEXT(length, marker); +#endif + +} +inline void DYNAMICGLES_FUNCTION(PushGroupMarkerEXT)(GLsizei length, const GLchar* marker) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPUSHGROUPMARKEREXTPROC _PushGroupMarkerEXT = (PFNGLPUSHGROUPMARKEREXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::PushGroupMarkerEXT); + return _PushGroupMarkerEXT(length, marker); +#endif + +} +inline void DYNAMICGLES_FUNCTION(PopGroupMarkerEXT)(void) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPOPGROUPMARKEREXTPROC _PopGroupMarkerEXT = (PFNGLPOPGROUPMARKEREXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::PopGroupMarkerEXT); + return _PopGroupMarkerEXT(); +#endif +} +inline void DYNAMICGLES_FUNCTION(GenQueriesEXT)(GLsizei n, GLuint* ids) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGENQUERIESEXTPROC _GenQueriesEXT = (PFNGLGENQUERIESEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GenQueriesEXT); + return _GenQueriesEXT(n, ids); +#endif +} +inline void DYNAMICGLES_FUNCTION(DeleteQueriesEXT)(GLsizei n, const GLuint* ids) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDELETEQUERIESEXTPROC _DeleteQueriesEXT = (PFNGLDELETEQUERIESEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DeleteQueriesEXT); + return _DeleteQueriesEXT(n, ids); +#endif +} +inline GLboolean DYNAMICGLES_FUNCTION(IsQueryEXT)(GLuint id) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLISQUERYEXTPROC _IsQueryEXT = (PFNGLISQUERYEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::IsQueryEXT); + return _IsQueryEXT(id); +#endif +} +inline void DYNAMICGLES_FUNCTION(BeginQueryEXT)(GLenum target, GLuint id) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBEGINQUERYEXTPROC _BeginQueryEXT = (PFNGLBEGINQUERYEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::BeginQueryEXT); + return _BeginQueryEXT(target, id); +#endif +} +inline void DYNAMICGLES_FUNCTION(EndQueryEXT)(GLenum target) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glEndQueryEXT) PFNGLENDQUERYEXTPROC; +#endif + PFNGLENDQUERYEXTPROC _EndQueryEXT = (PFNGLENDQUERYEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::EndQueryEXT); + return _EndQueryEXT(target); +} +inline void DYNAMICGLES_FUNCTION(GetQueryivEXT)(GLenum target, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetQueryivEXT) PFNGLGETQUERYIVEXTPROC; +#endif + PFNGLGETQUERYIVEXTPROC _GetQueryivEXT = (PFNGLGETQUERYIVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetQueryivEXT); + return _GetQueryivEXT(target, pname, params); +} +inline void DYNAMICGLES_FUNCTION(GetQueryObjectuivEXT)(GLuint id, GLenum pname, GLuint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetQueryObjectuivEXT) PFNGLGETQUERYOBJECTUIVEXTPROC; +#endif + PFNGLGETQUERYOBJECTUIVEXTPROC _GetQueryObjectuivEXT = (PFNGLGETQUERYOBJECTUIVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetQueryObjectuivEXT); + return _GetQueryObjectuivEXT(id, pname, params); +} +inline void DYNAMICGLES_FUNCTION(UseProgramStagesEXT)(GLuint pipeline, GLbitfield stages, GLuint program) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glUseProgramStagesEXT) PFNGLUSEPROGRAMSTAGESEXTPROC; +#endif + PFNGLUSEPROGRAMSTAGESEXTPROC _UseProgramStagesEXT = (PFNGLUSEPROGRAMSTAGESEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::UseProgramStagesEXT); + return _UseProgramStagesEXT(pipeline, stages, program); +} +inline void DYNAMICGLES_FUNCTION(ActiveShaderProgramEXT)(GLuint pipeline, GLuint program) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glActiveShaderProgramEXT) PFNGLACTIVESHADERPROGRAMEXTPROC; +#endif + PFNGLACTIVESHADERPROGRAMEXTPROC _ActiveShaderProgramEXT = (PFNGLACTIVESHADERPROGRAMEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ActiveShaderProgramEXT); + return _ActiveShaderProgramEXT(pipeline, program); +} +inline GLuint DYNAMICGLES_FUNCTION(CreateShaderProgramvEXT)(GLenum type, GLsizei count, const GLchar** strings) +{ +#if TARGET_OS_IPHONE + assert(0); + return 0; +#else + PFNGLCREATESHADERPROGRAMVEXTPROC _CreateShaderProgramvEXT = (PFNGLCREATESHADERPROGRAMVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::CreateShaderProgramvEXT); + return _CreateShaderProgramvEXT(type, count, strings); +#endif +} +inline void DYNAMICGLES_FUNCTION(BindProgramPipelineEXT)(GLuint pipeline) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glBindProgramPipelineEXT) PFNGLBINDPROGRAMPIPELINEEXTPROC; +#endif + PFNGLBINDPROGRAMPIPELINEEXTPROC _BindProgramPipelineEXT = (PFNGLBINDPROGRAMPIPELINEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::BindProgramPipelineEXT); + return _BindProgramPipelineEXT(pipeline); +} +inline void DYNAMICGLES_FUNCTION(DeleteProgramPipelinesEXT)(GLsizei n, const GLuint* pipelines) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glDeleteProgramPipelinesEXT) PFNGLDELETEPROGRAMPIPELINESEXTPROC; +#endif + PFNGLDELETEPROGRAMPIPELINESEXTPROC _DeleteProgramPipelinesEXT = (PFNGLDELETEPROGRAMPIPELINESEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DeleteProgramPipelinesEXT); + return _DeleteProgramPipelinesEXT(n, pipelines); +} +inline void DYNAMICGLES_FUNCTION(GenProgramPipelinesEXT)(GLsizei n, GLuint* pipelines) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGenProgramPipelinesEXT) PFNGLGENPROGRAMPIPELINESEXTPROC; +#endif + PFNGLGENPROGRAMPIPELINESEXTPROC _GenProgramPipelinesEXT = (PFNGLGENPROGRAMPIPELINESEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GenProgramPipelinesEXT); + return _GenProgramPipelinesEXT(n, pipelines); +} +inline GLboolean DYNAMICGLES_FUNCTION(IsProgramPipelineEXT)(GLuint pipeline) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glIsProgramPipelineEXT) PFNGLISPROGRAMPIPELINEEXTPROC; +#endif + PFNGLISPROGRAMPIPELINEEXTPROC _IsProgramPipelineEXT = (PFNGLISPROGRAMPIPELINEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::IsProgramPipelineEXT); + return _IsProgramPipelineEXT(pipeline); +} +inline void DYNAMICGLES_FUNCTION(ProgramParameteriEXT)(GLuint program, GLenum pname, GLint value) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glProgramParameteriEXT) PFNGLPROGRAMPARAMETERIEXTPROC; +#endif + PFNGLPROGRAMPARAMETERIEXTPROC _ProgramParameteriEXT = (PFNGLPROGRAMPARAMETERIEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramParameteriEXT); + return _ProgramParameteriEXT(program, pname, value); +} +inline void DYNAMICGLES_FUNCTION(GetProgramPipelineivEXT)(GLuint pipeline, GLenum pname, GLint* params) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetProgramPipelineivEXT) PFNGLGETPROGRAMPIPELINEIVEXTPROC; +#endif + PFNGLGETPROGRAMPIPELINEIVEXTPROC _GetProgramPipelineivEXT = (PFNGLGETPROGRAMPIPELINEIVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetProgramPipelineivEXT); + return _GetProgramPipelineivEXT(pipeline, pname, params); +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1iEXT)(GLuint program, GLint location, GLint x) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1IEXTPROC _ProgramUniform1iEXT = (PFNGLPROGRAMUNIFORM1IEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform1iEXT); + return _ProgramUniform1iEXT(program, location, x); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2iEXT)(GLuint program, GLint location, GLint x, GLint y) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2IEXTPROC _ProgramUniform2iEXT = (PFNGLPROGRAMUNIFORM2IEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform2iEXT); + return _ProgramUniform2iEXT(program, location, x, y); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3iEXT)(GLuint program, GLint location, GLint x, GLint y, GLint z) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3IEXTPROC _ProgramUniform3iEXT = (PFNGLPROGRAMUNIFORM3IEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform3iEXT); + return _ProgramUniform3iEXT(program, location, x, y, z); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4iEXT)(GLuint program, GLint location, GLint x, GLint y, GLint z, GLint w) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4IEXTPROC _ProgramUniform4iEXT = (PFNGLPROGRAMUNIFORM4IEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform4iEXT); + return _ProgramUniform4iEXT(program, location, x, y, z, w); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1fEXT)(GLuint program, GLint location, GLfloat x) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1FEXTPROC _ProgramUniform1fEXT = (PFNGLPROGRAMUNIFORM1FEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform1fEXT); + return _ProgramUniform1fEXT(program, location, x); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2fEXT)(GLuint program, GLint location, GLfloat x, GLfloat y) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2FEXTPROC _ProgramUniform2fEXT = (PFNGLPROGRAMUNIFORM2FEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform2fEXT); + return _ProgramUniform2fEXT(program, location, x, y); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3fEXT)(GLuint program, GLint location, GLfloat x, GLfloat y, GLfloat z) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3FEXTPROC _ProgramUniform3fEXT = (PFNGLPROGRAMUNIFORM3FEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform3fEXT); + return _ProgramUniform3fEXT(program, location, x, y, z); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4fEXT)(GLuint program, GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4FEXTPROC _ProgramUniform4fEXT = (PFNGLPROGRAMUNIFORM4FEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform4fEXT); + return _ProgramUniform4fEXT(program, location, x, y, z, w); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1ivEXT)(GLuint program, GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1IVEXTPROC _ProgramUniform1ivEXT = (PFNGLPROGRAMUNIFORM1IVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform1ivEXT); + return _ProgramUniform1ivEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2ivEXT)(GLuint program, GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2IVEXTPROC _ProgramUniform2ivEXT = (PFNGLPROGRAMUNIFORM2IVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform2ivEXT); + return _ProgramUniform2ivEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3ivEXT)(GLuint program, GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3IVEXTPROC _ProgramUniform3ivEXT = (PFNGLPROGRAMUNIFORM3IVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform3ivEXT); + return _ProgramUniform3ivEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4ivEXT)(GLuint program, GLint location, GLsizei count, const GLint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4IVEXTPROC _ProgramUniform4ivEXT = (PFNGLPROGRAMUNIFORM4IVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform4ivEXT); + return _ProgramUniform4ivEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1fvEXT)(GLuint program, GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1FVEXTPROC _ProgramUniform1fvEXT = (PFNGLPROGRAMUNIFORM1FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform1fvEXT); + return _ProgramUniform1fvEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2fvEXT)(GLuint program, GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2FVEXTPROC _ProgramUniform2fvEXT = (PFNGLPROGRAMUNIFORM2FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform2fvEXT); + return _ProgramUniform2fvEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3fvEXT)(GLuint program, GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3FVEXTPROC _ProgramUniform3fvEXT = (PFNGLPROGRAMUNIFORM3FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform3fvEXT); + return _ProgramUniform3fvEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4fvEXT)(GLuint program, GLint location, GLsizei count, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4FVEXTPROC _ProgramUniform4fvEXT = (PFNGLPROGRAMUNIFORM4FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform4fvEXT); + return _ProgramUniform4fvEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix2fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX2FVEXTPROC _ProgramUniformMatrix2fvEXT = (PFNGLPROGRAMUNIFORMMATRIX2FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix2fvEXT); + return _ProgramUniformMatrix2fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix3fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX3FVEXTPROC _ProgramUniformMatrix3fvEXT = (PFNGLPROGRAMUNIFORMMATRIX3FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix3fvEXT); + return _ProgramUniformMatrix3fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix4fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX4FVEXTPROC _ProgramUniformMatrix4fvEXT = (PFNGLPROGRAMUNIFORMMATRIX4FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix4fvEXT); + return _ProgramUniformMatrix4fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ValidateProgramPipelineEXT)(GLuint pipeline) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glValidateProgramPipelineEXT) PFNGLVALIDATEPROGRAMPIPELINEEXTPROC; +#endif + PFNGLVALIDATEPROGRAMPIPELINEEXTPROC _ValidateProgramPipelineEXT = (PFNGLVALIDATEPROGRAMPIPELINEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ValidateProgramPipelineEXT); + return _ValidateProgramPipelineEXT(pipeline); +} +inline void DYNAMICGLES_FUNCTION(GetProgramPipelineInfoLogEXT)(GLuint pipeline, GLsizei bufSize, GLsizei* length, GLchar* infoLog) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetProgramPipelineInfoLogEXT) PFNGLGETPROGRAMPIPELINEINFOLOGEXTPROC; +#endif + PFNGLGETPROGRAMPIPELINEINFOLOGEXTPROC _GetProgramPipelineInfoLogEXT = (PFNGLGETPROGRAMPIPELINEINFOLOGEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetProgramPipelineInfoLogEXT); + return _GetProgramPipelineInfoLogEXT(pipeline, bufSize, length, infoLog); + +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1uiEXT)(GLuint program, GLint location, GLuint v0) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1UIEXTPROC _ProgramUniform1uiEXT = (PFNGLPROGRAMUNIFORM1UIEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform1uiEXT); + return _ProgramUniform1uiEXT(program, location, v0); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2uiEXT)(GLuint program, GLint location, GLuint v0, GLuint v1) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2UIEXTPROC _ProgramUniform2uiEXT = (PFNGLPROGRAMUNIFORM2UIEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform2uiEXT); + return _ProgramUniform2uiEXT(program, location, v0, v1); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3uiEXT)(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3UIEXTPROC _ProgramUniform3uiEXT = (PFNGLPROGRAMUNIFORM3UIEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform3uiEXT); + return _ProgramUniform3uiEXT(program, location, v0, v1, v2); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4uiEXT)(GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4UIEXTPROC _ProgramUniform4uiEXT = (PFNGLPROGRAMUNIFORM4UIEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform4uiEXT); + return _ProgramUniform4uiEXT(program, location, v0, v1, v2, v3); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform1uivEXT)(GLuint program, GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM1UIVEXTPROC _ProgramUniform1uivEXT = (PFNGLPROGRAMUNIFORM1UIVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform1uivEXT); + return _ProgramUniform1uivEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform2uivEXT)(GLuint program, GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM2UIVEXTPROC _ProgramUniform2uivEXT = (PFNGLPROGRAMUNIFORM2UIVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform2uivEXT); + return _ProgramUniform2uivEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform3uivEXT)(GLuint program, GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM3UIVEXTPROC _ProgramUniform3uivEXT = (PFNGLPROGRAMUNIFORM3UIVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform3uivEXT); + return _ProgramUniform3uivEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniform4uivEXT)(GLuint program, GLint location, GLsizei count, const GLuint* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORM4UIVEXTPROC _ProgramUniform4uivEXT = (PFNGLPROGRAMUNIFORM4UIVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniform4uivEXT); + return _ProgramUniform4uivEXT(program, location, count, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix2x3fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX2X3FVEXTPROC _ProgramUniformMatrix2x3fvEXT = (PFNGLPROGRAMUNIFORMMATRIX2X3FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix2x3fvEXT); + return _ProgramUniformMatrix2x3fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix3x2fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX3X2FVEXTPROC _ProgramUniformMatrix3x2fvEXT = (PFNGLPROGRAMUNIFORMMATRIX3X2FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix3x2fvEXT); + return _ProgramUniformMatrix3x2fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix2x4fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX2X4FVEXTPROC _ProgramUniformMatrix2x4fvEXT = (PFNGLPROGRAMUNIFORMMATRIX2X4FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix2x4fvEXT); + return _ProgramUniformMatrix2x4fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix4x2fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX4X2FVEXTPROC _ProgramUniformMatrix4x2fvEXT = (PFNGLPROGRAMUNIFORMMATRIX4X2FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix4x2fvEXT); + return _ProgramUniformMatrix4x2fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix3x4fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX3X4FVEXTPROC _ProgramUniformMatrix3x4fvEXT = (PFNGLPROGRAMUNIFORMMATRIX3X4FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix3x4fvEXT); + return _ProgramUniformMatrix3x4fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformMatrix4x3fvEXT)(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPROGRAMUNIFORMMATRIX4X3FVEXTPROC _ProgramUniformMatrix4x3fvEXT = (PFNGLPROGRAMUNIFORMMATRIX4X3FVEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformMatrix4x3fvEXT); + return _ProgramUniformMatrix4x3fvEXT(program, location, count, transpose, value); +#endif +} +inline void DYNAMICGLES_FUNCTION(AlphaFuncQCOM)(GLenum func, GLclampf ref) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLALPHAFUNCQCOMPROC _AlphaFuncQCOM = (PFNGLALPHAFUNCQCOMPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::AlphaFuncQCOM); + return _AlphaFuncQCOM(func, ref); +#endif +} +inline void DYNAMICGLES_FUNCTION(ReadBufferNV)(GLenum mode) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLREADBUFFERNVPROC _ReadBufferNV = (PFNGLREADBUFFERNVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ReadBufferNV); + return _ReadBufferNV(mode); +#endif +} +inline void DYNAMICGLES_FUNCTION(DrawBuffersNV)(GLsizei n, const GLenum* bufs) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDRAWBUFFERSNVPROC _DrawBuffersNV = (PFNGLDRAWBUFFERSNVPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DrawBuffersNV); + return _DrawBuffersNV(n, bufs); +#endif +} +inline void DYNAMICGLES_FUNCTION(ReadBufferIndexedEXT)(GLenum src, GLint index) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLREADBUFFERINDEXEDEXTPROC _ReadBufferIndexedEXT = (PFNGLREADBUFFERINDEXEDEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ReadBufferIndexedEXT); + return _ReadBufferIndexedEXT(src, index); +#endif +} +inline void DYNAMICGLES_FUNCTION(DrawBuffersIndexedEXT)(GLint n, const GLenum* location, const GLint* indices) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDRAWBUFFERSINDEXEDEXTPROC _DrawBuffersIndexedEXT = (PFNGLDRAWBUFFERSINDEXEDEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DrawBuffersIndexedEXT); + return _DrawBuffersIndexedEXT(n, location, indices); +#endif +} +inline void DYNAMICGLES_FUNCTION(GetIntegeri_vEXT)(GLenum target, GLuint index, GLint* data) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLGETINTEGERI_VEXTPROC _GetIntegeri_vEXT = (PFNGLGETINTEGERI_VEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetIntegeri_vEXT); + return _GetIntegeri_vEXT(target, index, data); +#endif +} +inline void DYNAMICGLES_FUNCTION(DrawBuffersEXT)(GLsizei n, const GLenum* bufs) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLDRAWBUFFERSEXTPROC _DrawBuffersEXT = (PFNGLDRAWBUFFERSEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::DrawBuffersEXT); + return _DrawBuffersEXT(n, bufs); +#endif +} +inline void DYNAMICGLES_FUNCTION(BlendBarrierKHR)(void) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBLENDBARRIERKHRPROC _BlendBarrierKHR = (PFNGLBLENDBARRIERKHRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::BlendBarrierKHR); + return _BlendBarrierKHR(); +#endif +} +inline void DYNAMICGLES_FUNCTION(TexStorage3DMultisampleOES)(GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLTEXSTORAGE3DMULTISAMPLEOESPROC _TexStorage3DMultisampleOES = (PFNGLTEXSTORAGE3DMULTISAMPLEOESPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::TexStorage3DMultisampleOES); + return _TexStorage3DMultisampleOES(target, samples, internalformat, width, height, depth, fixedsamplelocations); +#endif +} +inline void DYNAMICGLES_FUNCTION(FramebufferTextureMultiviewOVR)(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC _FramebufferTextureMultiviewOVR = (PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FramebufferTextureMultiviewOVR); + return _FramebufferTextureMultiviewOVR(target, attachment, texture, level, baseViewIndex, numViews); +#endif +} +inline void DYNAMICGLES_FUNCTION(FramebufferPixelLocalStorageSizeEXT)(GLuint target, GLsizei storageSize) +{ + typedef void (GL_APIENTRY * PFNGLFRAMEBUFFERPIXELLOCALSTORAGESIZEPROC)(GLuint target, GLsizei storageSize); + PFNGLFRAMEBUFFERPIXELLOCALSTORAGESIZEPROC _FramebufferPixelLocalStorageSize = (PFNGLFRAMEBUFFERPIXELLOCALSTORAGESIZEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FramebufferPixelLocalStorageSizeEXT); + return _FramebufferPixelLocalStorageSize(target, storageSize); +} +inline void DYNAMICGLES_FUNCTION(ClearPixelLocalStorageuiEXT)(GLsizei offset, GLsizei n, const GLuint* values) +{ + typedef void (GL_APIENTRY * PFNGLCLEARPIXELLOCALSTORAGEUIPROC)(GLsizei offset, GLsizei n, const GLuint * values); + PFNGLCLEARPIXELLOCALSTORAGEUIPROC _ClearPixelLocalStorageui = (PFNGLCLEARPIXELLOCALSTORAGEUIPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ClearPixelLocalStorageuiEXT); + return _ClearPixelLocalStorageui(offset, n, values); +} +inline void DYNAMICGLES_FUNCTION(GetFramebufferPixelLocalStorageSizeEXT)(GLuint target) +{ + typedef void (GL_APIENTRY * PFNGLGETFRAMEBUFFERPIXELLOCALSTORAGESIZEPROC)(GLuint target); + PFNGLGETFRAMEBUFFERPIXELLOCALSTORAGESIZEPROC _GetFramebufferPixelLocalStorageSize = (PFNGLGETFRAMEBUFFERPIXELLOCALSTORAGESIZEPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetFramebufferPixelLocalStorageSizeEXT); + return _GetFramebufferPixelLocalStorageSize(target); +} +inline void DYNAMICGLES_FUNCTION(BufferStorageEXT)(GLenum target, GLsizei size, const void* data, GLbitfield flags) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLBUFFERSTORAGEEXTPROC _BufferStorageEXT = (PFNGLBUFFERSTORAGEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::BufferStorageEXT); + return _BufferStorageEXT(target, size, data, flags); +#endif +} +inline void DYNAMICGLES_FUNCTION(ClearTexImageEXT)(GLuint texture, GLint level, GLenum format, GLenum type, const GLvoid* data) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLCLEARTEXIMAGEEXTPROC _ClearTexImageEXT = (PFNGLCLEARTEXIMAGEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ClearTexImageEXT); + return _ClearTexImageEXT(texture, level, format, type, data); +#endif +} +inline void DYNAMICGLES_FUNCTION(ClearTexSubImageEXT)(GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* data) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLCLEARTEXSUBIMAGEEXTPROC _ClearTexSubImageEXT = (PFNGLCLEARTEXSUBIMAGEEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ClearTexSubImageEXT); + return _ClearTexSubImageEXT(texture, level, xoffset, yoffset, zoffset, width, height, depth, format, type, data); +#endif +} +inline void DYNAMICGLES_FUNCTION(ClearTexSubImageIMG)(GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* data) +{ + typedef void (GL_APIENTRY * PFNGLCLEARTEXSUBIMAGEIMGPROC)(GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void* data); + PFNGLCLEARTEXSUBIMAGEIMGPROC _ClearTexSubImageIMG = (PFNGLCLEARTEXSUBIMAGEIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ClearTexSubImageIMG); + return _ClearTexSubImageIMG(texture, level, xoffset, yoffset, zoffset, width, height, depth, format, type, data); +} +inline void DYNAMICGLES_FUNCTION(FramebufferTexture2DDownsampleIMG)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLuint xscale, GLuint yscale) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLFRAMEBUFFERTEXTURE2DDOWNSAMPLEIMGPROC _FramebufferTexture2DDownsampleIMG = (PFNGLFRAMEBUFFERTEXTURE2DDOWNSAMPLEIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FramebufferTexture2DDownsampleIMG); + return _FramebufferTexture2DDownsampleIMG(target, attachment, textarget, texture, level, xscale, yscale); +#endif +} +inline void DYNAMICGLES_FUNCTION(FramebufferTextureLayerDownsampleIMG)(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer, GLuint xscale, GLuint yscale) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLFRAMEBUFFERTEXTURELAYERDOWNSAMPLEIMGPROC _FramebufferTextureLayerDownsampleIMG = (PFNGLFRAMEBUFFERTEXTURELAYERDOWNSAMPLEIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::FramebufferTextureLayerDownsampleIMG); + return _FramebufferTextureLayerDownsampleIMG(target, attachment, texture, level, layer, xscale, yscale); +#endif +} +inline void DYNAMICGLES_FUNCTION(PatchParameteriEXT)(GLenum pname, GLint val) +{ +#if TARGET_OS_IPHONE + assert(0); +#else + PFNGLPATCHPARAMETERIEXTPROC _PatchParameteriEXT = (PFNGLPATCHPARAMETERIEXTPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::PatchParameteriEXT); + return _PatchParameteriEXT(pname, val); +#endif +} + +#ifdef GL_IMG_bindless_texture + +inline GLuint64 DYNAMICGLES_FUNCTION(GetTextureHandleIMG)(GLuint texture) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetTextureHandleIMG) PFNGLGETTEXTUREHANDLEIMGPROC; +#endif + PFNGLGETTEXTUREHANDLEIMGPROC _GetTextureHandleIMG = (PFNGLGETTEXTUREHANDLEIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetTextureHandleIMG); + return _GetTextureHandleIMG(texture); +} +inline GLuint64 DYNAMICGLES_FUNCTION(GetTextureSamplerHandleIMG)(GLuint texture, GLuint sampler) +{ +#if TARGET_OS_IPHONE + typedef decltype(&glGetTextureSamplerHandleIMG) PFNGLGETTEXTURESAMPLERHANDLEIMGPROC; +#endif + PFNGLGETTEXTURESAMPLERHANDLEIMGPROC _GetTextureSamplerHandleIMG = (PFNGLGETTEXTURESAMPLERHANDLEIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::GetTextureSamplerHandleIMG); + return _GetTextureSamplerHandleIMG(texture, sampler); +} +inline void DYNAMICGLES_FUNCTION(UniformHandleui64IMG)(GLint location, GLuint64 value) +{ +#if TARGET_OS_IPHONE + assert(0); +#endif + PFNGLUNIFORMHANDLEUI64IMGPROC _UniformHandleui64IMG = (PFNGLUNIFORMHANDLEUI64IMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::UniformHandleui64IMG); + return _UniformHandleui64IMG(location, value); +} +inline void DYNAMICGLES_FUNCTION(UniformHandleui64vIMG)(GLint location, GLsizei count, const GLuint64* value) +{ +#if TARGET_OS_IPHONE + assert(0); +#endif + PFNGLUNIFORMHANDLEUI64VIMGPROC _UniformHandleui64vIMG = (PFNGLUNIFORMHANDLEUI64VIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::UniformHandleui64vIMG); + return _UniformHandleui64vIMG(location, count, value); +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformHandleui64IMG)(GLuint program, GLint location, GLuint64 value) +{ +#if TARGET_OS_IPHONE + assert(0); +#endif + PFNGLPROGRAMUNIFORMHANDLEUI64IMGPROC _ProgramUniformHandleui64IMG = (PFNGLPROGRAMUNIFORMHANDLEUI64IMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformHandleui64IMG); + return _ProgramUniformHandleui64IMG(program, location, value); +} +inline void DYNAMICGLES_FUNCTION(ProgramUniformHandleui64vIMG)(GLuint program, GLint location, GLsizei count, const GLuint64* values) +{ +#if TARGET_OS_IPHONE + assert(0); +#endif + PFNGLPROGRAMUNIFORMHANDLEUI64VIMGPROC _ProgramUniformHandleui64vIMG = (PFNGLPROGRAMUNIFORMHANDLEUI64VIMGPROC)gl::internals::getGlesExtFunction(gl::internals::GlExtFuncName::ProgramUniformHandleui64vIMG); + return _ProgramUniformHandleui64vIMG(program, location, count, values); +} +#endif +#ifndef DYNAMICGLES_NO_NAMESPACE +} +#elif TARGET_OS_IPHONE +} +} +#endif + +inline bool isGlExtensionSupported(const char* extensionName, bool resetExtensionCache = false) +{ +#ifndef DYNAMICGLES_NO_NAMESPACE + const unsigned char* extensionString = GetString(GL_EXTENSIONS); +#else + const unsigned char* extensionString = glGetString(GL_EXTENSIONS); +#endif + return gl::internals::isExtensionSupported(extensionString, extensionName); +} + +#ifndef DYNAMICGLES_NO_NAMESPACE +} +#endif diff --git a/sources/pvrsdk/Include/DynamicOCL.h b/sources/pvrsdk/Include/DynamicOCL.h new file mode 100755 index 00000000..37704654 --- /dev/null +++ b/sources/pvrsdk/Include/DynamicOCL.h @@ -0,0 +1,1035 @@ +#pragma once +#ifdef CL_PROTOTYPES +#undef CL_PROTOTYPES +#endif +#ifndef CL_NO_PROTOTYPES +#define CL_NO_PROTOTYPES +#endif +#if defined( _WIN32 ) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include +#endif +#include +#ifdef __APPLE__ +#include +#else +#include +#endif +#include + +#include "pvr_openlib.h" + +#ifndef DYNAMICCL_NO_NAMESPACE +#define DEFINE_CL_FUNCTION_NAME(name) CL_API_ENTRY name +#else +#define DEFINE_CL_FUNCTION_NAME(name) CL_API_ENTRY cl##name +#endif + +/** DEFINE_CL_FUNCTION_NAME THE PLATFORM SPECIFIC LIBRARY NAME **/ +namespace cl { +namespace internals { +#ifdef _WIN32 +static const char* clLibName = "OpenCL.dll"; +#elif defined(TARGET_OS_MAC) +static const char* clLibName = "/System/Library/Frameworks/OpenCL.framework/OpenCL"; +#else +static const char* clLibName = "libOpenCL.so"; +#endif +static const char* clLibAltName = "libPVROCL.so"; +} +} + +namespace cl { +namespace CLFunctions { +enum Enum +{ + GetPlatformIDs, GetPlatformInfo, GetDeviceIDs, GetDeviceInfo, CreateSubDevices, RetainDevice, ReleaseDevice, + SetDefaultDeviceCommandQueue, GetDeviceAndHostTimer, GetHostTimer, CreateContext, CreateContextFromType, RetainContext, + ReleaseContext, GetContextInfo, CreateCommandQueueWithProperties, RetainCommandQueue, ReleaseCommandQueue, + GetCommandQueueInfo, CreateBuffer, CreateSubBuffer, CreateImage, CreatePipe, RetainMemObject, ReleaseMemObject, + GetSupportedImageFormats, GetMemObjectInfo, GetImageInfo, GetPipeInfo, SetMemObjectDestructorCallback, SVMAlloc, SVMFree, + CreateSamplerWithProperties, RetainSampler, ReleaseSampler, GetSamplerInfo, CreateProgramWithSource, CreateProgramWithBinary, + CreateProgramWithBuiltInKernels, CreateProgramWithIL, RetainProgram, ReleaseProgram, BuildProgram, CompileProgram, + LinkProgram, SetProgramReleaseCallback, SetProgramSpecializationConstant, UnloadPlatformCompiler, + GetProgramInfo, GetProgramBuildInfo, CreateKernel, CreateKernelsInProgram, CloneKernel, RetainKernel, ReleaseKernel, + SetKernelArg, SetKernelArgSVMPointer, SetKernelExecInfo, GetKernelInfo, GetKernelArgInfo, GetKernelWorkGroupInfo, + GetKernelSubGroupInfo, WaitForEvents, GetEventInfo, CreateUserEvent, RetainEvent, ReleaseEvent, SetUserEventStatus, + SetEventCallback, GetEventProfilingInfo, Flush, Finish, EnqueueReadBuffer, EnqueueReadBufferRect, EnqueueWriteBuffer, + EnqueueWriteBufferRect, EnqueueFillBuffer, EnqueueCopyBuffer, EnqueueCopyBufferRect, EnqueueReadImage, + EnqueueWriteImage, EnqueueFillImage, EnqueueCopyImage, EnqueueCopyImageToBuffer, EnqueueCopyBufferToImage, EnqueueMapBuffer, + EnqueueMapImage, EnqueueUnmapMemObject, EnqueueMigrateMemObjects, EnqueueNDRangeKernel, EnqueueNativeKernel, + EnqueueMarkerWithWaitList, EnqueueBarrierWithWaitList, EnqueueSVMFree, EnqueueSVMMemcpy, EnqueueSVMMemFill, + EnqueueSVMMap, EnqueueSVMUnmap, EnqueueSVMMigrateMem, GetExtensionFunctionAddressForPlatform, CreateImage2D, CreateImage3D, + EnqueueMarker, EnqueueWaitForEvents, EnqueueBarrier, UnloadCompiler, GetExtensionFunctionAddress, CreateCommandQueue, + CreateSampler, EnqueueTask, + CreateFromGLBuffer, CreateFromGLTexture, CreateFromGLRenderbuffer, GetGLObjectInfo, GetGLTextureInfo, EnqueueAcquireGLObjects, + EnqueueReleaseGLObjects, CreateFromGLTexture2D, CreateFromGLTexture3D, GetGLContextInfoKHR, + NUMBER_OF_CL_FUNCTIONS +}; +} + +namespace internals { +inline void* getClFunction(CLFunctions::Enum func) +{ + static void* CLFunctionTable[CLFunctions::NUMBER_OF_CL_FUNCTIONS]; + + if (CLFunctionTable[0] == NULL) + { + pvr::lib::LIBTYPE lib = pvr::lib::openlib(clLibName); + if (lib) + { + Log_Info("OpenCL Bindings: Successfully loaded library %s for OpenCL", clLibName); + } + else + { + lib = pvr::lib::openlib(clLibAltName); + if (lib) + { + Log_Info("OpenCL Bindings: Successfully loaded alternative library %s for OpenCL, after %s failed", clLibAltName, clLibName); + } + else + { + Log_Error("OpenCL Bindings: Failed to open library %s", clLibName); + return nullptr; + } + } + + CLFunctionTable[CLFunctions::GetPlatformIDs] = pvr::lib::getLibFunctionChecked(lib, "clGetPlatformIDs"); + CLFunctionTable[CLFunctions::GetPlatformInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetPlatformInfo"); + CLFunctionTable[CLFunctions::GetDeviceIDs] = pvr::lib::getLibFunctionChecked(lib, "clGetDeviceIDs"); + CLFunctionTable[CLFunctions::GetDeviceInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetDeviceInfo"); + CLFunctionTable[CLFunctions::CreateSubDevices] = pvr::lib::getLibFunctionChecked(lib, "clCreateSubDevices"); + CLFunctionTable[CLFunctions::RetainDevice] = pvr::lib::getLibFunctionChecked(lib, "clRetainDevice"); + CLFunctionTable[CLFunctions::ReleaseDevice] = pvr::lib::getLibFunctionChecked(lib, "clReleaseDevice"); + CLFunctionTable[CLFunctions::SetDefaultDeviceCommandQueue] = pvr::lib::getLibFunctionChecked(lib, "clSetDefaultDeviceCommandQueue"); + CLFunctionTable[CLFunctions::GetDeviceAndHostTimer] = pvr::lib::getLibFunctionChecked(lib, "clGetDeviceAndHostTimer"); + CLFunctionTable[CLFunctions::GetHostTimer] = pvr::lib::getLibFunctionChecked(lib, "clGetHostTimer"); + CLFunctionTable[CLFunctions::CreateContext] = pvr::lib::getLibFunctionChecked(lib, "clCreateContext"); + CLFunctionTable[CLFunctions::CreateContextFromType] = pvr::lib::getLibFunctionChecked(lib, "clCreateContextFromType"); + CLFunctionTable[CLFunctions::RetainContext] = pvr::lib::getLibFunctionChecked(lib, "clRetainContext"); + CLFunctionTable[CLFunctions::ReleaseContext] = pvr::lib::getLibFunctionChecked(lib, "clReleaseContext"); + CLFunctionTable[CLFunctions::GetContextInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetContextInfo"); + CLFunctionTable[CLFunctions::CreateCommandQueueWithProperties] = pvr::lib::getLibFunctionChecked(lib, "clCreateCommandQueueWithProperties"); + CLFunctionTable[CLFunctions::RetainCommandQueue] = pvr::lib::getLibFunctionChecked(lib, "clRetainCommandQueue"); + CLFunctionTable[CLFunctions::ReleaseCommandQueue] = pvr::lib::getLibFunctionChecked(lib, "clReleaseCommandQueue"); + CLFunctionTable[CLFunctions::GetCommandQueueInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetCommandQueueInfo"); + CLFunctionTable[CLFunctions::CreateBuffer] = pvr::lib::getLibFunctionChecked(lib, "clCreateBuffer"); + CLFunctionTable[CLFunctions::CreateSubBuffer] = pvr::lib::getLibFunctionChecked(lib, "clCreateSubBuffer"); + CLFunctionTable[CLFunctions::CreateImage] = pvr::lib::getLibFunctionChecked(lib, "clCreateImage"); + CLFunctionTable[CLFunctions::CreatePipe] = pvr::lib::getLibFunctionChecked(lib, "clCreatePipe"); + CLFunctionTable[CLFunctions::RetainMemObject] = pvr::lib::getLibFunctionChecked(lib, "clRetainMemObject"); + CLFunctionTable[CLFunctions::ReleaseMemObject] = pvr::lib::getLibFunctionChecked(lib, "clReleaseMemObject"); + CLFunctionTable[CLFunctions::GetSupportedImageFormats] = pvr::lib::getLibFunctionChecked(lib, "clGetSupportedImageFormats"); + CLFunctionTable[CLFunctions::GetMemObjectInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetMemObjectInfo"); + CLFunctionTable[CLFunctions::GetImageInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetImageInfo"); + CLFunctionTable[CLFunctions::GetPipeInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetPipeInfo"); + CLFunctionTable[CLFunctions::SetMemObjectDestructorCallback] = pvr::lib::getLibFunctionChecked(lib, "clSetMemObjectDestructorCallback"); + CLFunctionTable[CLFunctions::SVMAlloc] = pvr::lib::getLibFunctionChecked(lib, "clSVMAlloc"); + CLFunctionTable[CLFunctions::SVMFree] = pvr::lib::getLibFunctionChecked(lib, "clSVMFree"); + CLFunctionTable[CLFunctions::CreateSamplerWithProperties] = pvr::lib::getLibFunctionChecked(lib, "clCreateSamplerWithProperties"); + CLFunctionTable[CLFunctions::RetainSampler] = pvr::lib::getLibFunctionChecked(lib, "clRetainSampler"); + CLFunctionTable[CLFunctions::ReleaseSampler] = pvr::lib::getLibFunctionChecked(lib, "clReleaseSampler"); + CLFunctionTable[CLFunctions::GetSamplerInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetSamplerInfo"); + CLFunctionTable[CLFunctions::CreateProgramWithSource] = pvr::lib::getLibFunctionChecked(lib, "clCreateProgramWithSource"); + CLFunctionTable[CLFunctions::CreateProgramWithBinary] = pvr::lib::getLibFunctionChecked(lib, "clCreateProgramWithBinary"); + CLFunctionTable[CLFunctions::CreateProgramWithBuiltInKernels] = pvr::lib::getLibFunctionChecked(lib, "clCreateProgramWithBuiltInKernels"); + CLFunctionTable[CLFunctions::CreateProgramWithIL] = pvr::lib::getLibFunctionChecked(lib, "clCreateProgramWithIL"); + CLFunctionTable[CLFunctions::RetainProgram] = pvr::lib::getLibFunctionChecked(lib, "clRetainProgram"); + CLFunctionTable[CLFunctions::ReleaseProgram] = pvr::lib::getLibFunctionChecked(lib, "clReleaseProgram"); + CLFunctionTable[CLFunctions::BuildProgram] = pvr::lib::getLibFunctionChecked(lib, "clBuildProgram"); + CLFunctionTable[CLFunctions::CompileProgram] = pvr::lib::getLibFunctionChecked(lib, "clCompileProgram"); + CLFunctionTable[CLFunctions::LinkProgram] = pvr::lib::getLibFunctionChecked(lib, "clLinkProgram"); + CLFunctionTable[CLFunctions::SetProgramReleaseCallback] = pvr::lib::getLibFunctionChecked(lib, "clSetProgramReleaseCallback"); + CLFunctionTable[CLFunctions::SetProgramSpecializationConstant] = pvr::lib::getLibFunctionChecked(lib, "clSetProgramSpecializationConstant"); + CLFunctionTable[CLFunctions::UnloadPlatformCompiler] = pvr::lib::getLibFunctionChecked(lib, "clUnloadPlatformCompiler"); + CLFunctionTable[CLFunctions::GetProgramInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetProgramInfo"); + CLFunctionTable[CLFunctions::GetProgramBuildInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetProgramBuildInfo"); + CLFunctionTable[CLFunctions::CreateKernel] = pvr::lib::getLibFunctionChecked(lib, "clCreateKernel"); + CLFunctionTable[CLFunctions::CreateKernelsInProgram] = pvr::lib::getLibFunctionChecked(lib, "clCreateKernelsInProgram"); + CLFunctionTable[CLFunctions::CloneKernel] = pvr::lib::getLibFunctionChecked(lib, "clCloneKernel"); + CLFunctionTable[CLFunctions::RetainKernel] = pvr::lib::getLibFunctionChecked(lib, "clRetainKernel"); + CLFunctionTable[CLFunctions::ReleaseKernel] = pvr::lib::getLibFunctionChecked(lib, "clReleaseKernel"); + CLFunctionTable[CLFunctions::SetKernelArg] = pvr::lib::getLibFunctionChecked(lib, "clSetKernelArg"); + CLFunctionTable[CLFunctions::SetKernelArgSVMPointer] = pvr::lib::getLibFunctionChecked(lib, "clSetKernelArgSVMPointer"); + CLFunctionTable[CLFunctions::SetKernelExecInfo] = pvr::lib::getLibFunctionChecked(lib, "clSetKernelExecInfo"); + CLFunctionTable[CLFunctions::GetKernelInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetKernelInfo"); + CLFunctionTable[CLFunctions::GetKernelArgInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetKernelArgInfo"); + CLFunctionTable[CLFunctions::GetKernelWorkGroupInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetKernelWorkGroupInfo"); + CLFunctionTable[CLFunctions::GetKernelSubGroupInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetKernelSubGroupInfo"); + CLFunctionTable[CLFunctions::WaitForEvents] = pvr::lib::getLibFunctionChecked(lib, "clWaitForEvents"); + CLFunctionTable[CLFunctions::GetEventInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetEventInfo"); + CLFunctionTable[CLFunctions::CreateUserEvent] = pvr::lib::getLibFunctionChecked(lib, "clCreateUserEvent"); + CLFunctionTable[CLFunctions::RetainEvent] = pvr::lib::getLibFunctionChecked(lib, "clRetainEvent"); + CLFunctionTable[CLFunctions::ReleaseEvent] = pvr::lib::getLibFunctionChecked(lib, "clReleaseEvent"); + CLFunctionTable[CLFunctions::SetUserEventStatus] = pvr::lib::getLibFunctionChecked(lib, "clSetUserEventStatus"); + CLFunctionTable[CLFunctions::SetEventCallback] = pvr::lib::getLibFunctionChecked(lib, "clSetEventCallback"); + CLFunctionTable[CLFunctions::GetEventProfilingInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetEventProfilingInfo"); + CLFunctionTable[CLFunctions::Flush] = pvr::lib::getLibFunctionChecked(lib, "clFlush"); + CLFunctionTable[CLFunctions::Finish] = pvr::lib::getLibFunctionChecked(lib, "clFinish"); + CLFunctionTable[CLFunctions::EnqueueReadBuffer] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueReadBuffer"); + CLFunctionTable[CLFunctions::EnqueueReadBufferRect] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueReadBufferRect"); + CLFunctionTable[CLFunctions::EnqueueWriteBuffer] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueWriteBuffer"); + CLFunctionTable[CLFunctions::EnqueueWriteBufferRect] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueWriteBufferRect"); + CLFunctionTable[CLFunctions::EnqueueFillBuffer] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueFillBuffer"); + CLFunctionTable[CLFunctions::EnqueueCopyBuffer] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueCopyBuffer"); + CLFunctionTable[CLFunctions::EnqueueCopyBufferRect] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueCopyBufferRect"); + CLFunctionTable[CLFunctions::EnqueueReadImage] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueReadImage"); + CLFunctionTable[CLFunctions::EnqueueWriteImage] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueWriteImage"); + CLFunctionTable[CLFunctions::EnqueueFillImage] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueFillImage"); + CLFunctionTable[CLFunctions::EnqueueCopyImage] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueCopyImage"); + CLFunctionTable[CLFunctions::EnqueueCopyImageToBuffer] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueCopyImageToBuffer"); + CLFunctionTable[CLFunctions::EnqueueCopyBufferToImage] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueCopyBufferToImage"); + CLFunctionTable[CLFunctions::EnqueueMapBuffer] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueMapBuffer"); + CLFunctionTable[CLFunctions::EnqueueMapImage] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueMapImage"); + CLFunctionTable[CLFunctions::EnqueueUnmapMemObject] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueUnmapMemObject"); + CLFunctionTable[CLFunctions::EnqueueMigrateMemObjects] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueMigrateMemObjects"); + CLFunctionTable[CLFunctions::EnqueueNDRangeKernel] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueNDRangeKernel"); + CLFunctionTable[CLFunctions::EnqueueNativeKernel] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueNativeKernel"); + CLFunctionTable[CLFunctions::EnqueueMarkerWithWaitList] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueMarkerWithWaitList"); + CLFunctionTable[CLFunctions::EnqueueBarrierWithWaitList] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueBarrierWithWaitList"); + CLFunctionTable[CLFunctions::EnqueueSVMFree] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueSVMFree"); + CLFunctionTable[CLFunctions::EnqueueSVMMemcpy] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueSVMMemcpy"); + CLFunctionTable[CLFunctions::EnqueueSVMMemFill] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueSVMMemFill"); + CLFunctionTable[CLFunctions::EnqueueSVMMap] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueSVMMap"); + CLFunctionTable[CLFunctions::EnqueueSVMUnmap] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueSVMUnmap"); + CLFunctionTable[CLFunctions::EnqueueSVMMigrateMem] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueSVMMigrateMem"); + CLFunctionTable[CLFunctions::GetExtensionFunctionAddressForPlatform] = pvr::lib::getLibFunctionChecked(lib, "clGetExtensionFunctionAddressForPlatform"); + CLFunctionTable[CLFunctions::CreateImage2D] = pvr::lib::getLibFunctionChecked(lib, "clCreateImage2D"); + CLFunctionTable[CLFunctions::CreateImage3D] = pvr::lib::getLibFunctionChecked(lib, "clCreateImage3D"); + CLFunctionTable[CLFunctions::EnqueueMarker] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueMarker"); + CLFunctionTable[CLFunctions::EnqueueWaitForEvents] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueWaitForEvents"); + CLFunctionTable[CLFunctions::EnqueueBarrier] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueBarrier"); + CLFunctionTable[CLFunctions::UnloadCompiler] = pvr::lib::getLibFunctionChecked(lib, "clUnloadCompiler"); + CLFunctionTable[CLFunctions::GetExtensionFunctionAddress] = pvr::lib::getLibFunctionChecked(lib, "clGetExtensionFunctionAddress"); + CLFunctionTable[CLFunctions::CreateCommandQueue] = pvr::lib::getLibFunctionChecked(lib, "clCreateCommandQueue"); + CLFunctionTable[CLFunctions::CreateSampler] = pvr::lib::getLibFunctionChecked(lib, "clCreateSampler"); + CLFunctionTable[CLFunctions::EnqueueTask] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueTask"); + CLFunctionTable[CLFunctions::CreateFromGLBuffer] = pvr::lib::getLibFunctionChecked(lib, "clCreateFromGLBuffer"); + CLFunctionTable[CLFunctions::CreateFromGLTexture] = pvr::lib::getLibFunctionChecked(lib, "clCreateFromGLTexture"); + CLFunctionTable[CLFunctions::CreateFromGLRenderbuffer] = pvr::lib::getLibFunctionChecked(lib, "clCreateFromGLRenderbuffer"); + CLFunctionTable[CLFunctions::GetGLObjectInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetGLObjectInfo"); + CLFunctionTable[CLFunctions::GetGLTextureInfo] = pvr::lib::getLibFunctionChecked(lib, "clGetGLTextureInfo"); + CLFunctionTable[CLFunctions::EnqueueAcquireGLObjects] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueAcquireGLObjects"); + CLFunctionTable[CLFunctions::EnqueueReleaseGLObjects] = pvr::lib::getLibFunctionChecked(lib, "clEnqueueReleaseGLObjects"); + CLFunctionTable[CLFunctions::CreateFromGLTexture2D] = pvr::lib::getLibFunctionChecked(lib, "clCreateFromGLTexture2D"); + CLFunctionTable[CLFunctions::CreateFromGLTexture3D] = pvr::lib::getLibFunctionChecked(lib, "clCreateFromGLTexture3D"); + CLFunctionTable[CLFunctions::GetGLContextInfoKHR] = pvr::lib::getLibFunctionChecked(lib, "clGetGLContextInfoKHR"); + } + return CLFunctionTable[func]; +} +} + +bool testFunctionExists(CLFunctions::Enum function) +{ + return internals::getClFunction(function) != 0; +} + +} +#ifndef DYNAMICCL_NO_NAMESPACE +namespace cl { +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetPlatformIDs)(cl_uint num_entries, cl_platform_id* platforms, cl_uint* num_platforms) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetPlatformIDs)(cl_uint num_entries, cl_platform_id * platforms, cl_uint * num_platforms); + static PFNclGetPlatformIDs _clGetPlatformIDs = (PFNclGetPlatformIDs)cl::internals::getClFunction(cl::CLFunctions::GetPlatformIDs); + return _clGetPlatformIDs(num_entries, platforms, num_platforms); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetPlatformInfo)(cl_platform_id platform, cl_platform_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetPlatformInfo)(cl_platform_id platform, cl_platform_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetPlatformInfo _clGetPlatformInfo = (PFNclGetPlatformInfo)cl::internals::getClFunction(cl::CLFunctions::GetPlatformInfo); + return _clGetPlatformInfo(platform, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetDeviceIDs)(cl_platform_id platform, cl_device_type device_type, cl_uint num_entries, cl_device_id* devices, cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetDeviceIDs)(cl_platform_id platform, cl_device_type device_type, cl_uint num_entries, cl_device_id * devices, cl_uint * num_devices); + static PFNclGetDeviceIDs _clGetDeviceIDs = (PFNclGetDeviceIDs)cl::internals::getClFunction(cl::CLFunctions::GetDeviceIDs); + return _clGetDeviceIDs(platform, device_type, num_entries, devices, num_devices); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetDeviceInfo)(cl_device_id device, cl_device_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetDeviceInfo)(cl_device_id device, cl_device_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetDeviceInfo _clGetDeviceInfo = (PFNclGetDeviceInfo)cl::internals::getClFunction(cl::CLFunctions::GetDeviceInfo); + return _clGetDeviceInfo(device, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateSubDevices)(cl_device_id in_device, const cl_device_partition_property* properties, cl_uint num_devices, cl_device_id* out_devices, cl_uint* num_devices_ret) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclCreateSubDevices)(cl_device_id in_device, const cl_device_partition_property * properties, cl_uint num_devices, cl_device_id * out_devices, cl_uint * num_devices_ret); + static PFNclCreateSubDevices _clCreateSubDevices = (PFNclCreateSubDevices)cl::internals::getClFunction(cl::CLFunctions::CreateSubDevices); + return _clCreateSubDevices(in_device, properties, num_devices, out_devices, num_devices_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(RetainDevice)(cl_device_id device) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclRetainDevice)(cl_device_id device); + static PFNclRetainDevice _clRetainDevice = (PFNclRetainDevice)cl::internals::getClFunction(cl::CLFunctions::RetainDevice); + return _clRetainDevice(device); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(ReleaseDevice)(cl_device_id device) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclReleaseDevice)(cl_device_id device); + static PFNclReleaseDevice _clReleaseDevice = (PFNclReleaseDevice)cl::internals::getClFunction(cl::CLFunctions::ReleaseDevice); + return _clReleaseDevice(device); +} +#ifdef CL_API_SUFFIX__VERSION_2_1 +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetDefaultDeviceCommandQueue)(cl_context context, cl_device_id device, cl_command_queue command_queue) CL_API_SUFFIX__VERSION_2_1 +{ + typedef cl_int(CL_API_CALL * PFNclSetDefaultDeviceCommandQueue)(cl_context context, cl_device_id device, cl_command_queue command_queue); + static PFNclSetDefaultDeviceCommandQueue _clSetDefaultDeviceCommandQueue = (PFNclSetDefaultDeviceCommandQueue)cl::internals::getClFunction(cl::CLFunctions::SetDefaultDeviceCommandQueue); + return _clSetDefaultDeviceCommandQueue(context, device, command_queue); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetDeviceAndHostTimer)(cl_device_id device, cl_ulong* device_timestamp, cl_ulong* host_timestamp) CL_API_SUFFIX__VERSION_2_1 +{ + typedef cl_int(CL_API_CALL * PFNclGetDeviceAndHostTimer)(cl_device_id device, cl_ulong * device_timestamp, cl_ulong * host_timestamp); + static PFNclGetDeviceAndHostTimer _clGetDeviceAndHostTimer = (PFNclGetDeviceAndHostTimer)cl::internals::getClFunction(cl::CLFunctions::GetDeviceAndHostTimer); + return _clGetDeviceAndHostTimer(device, device_timestamp, host_timestamp); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetHostTimer)(cl_device_id device, cl_ulong* host_timestamp) CL_API_SUFFIX__VERSION_2_1 +{ + typedef cl_int(CL_API_CALL * PFNclGetHostTimer)(cl_device_id device, cl_ulong * host_timestamp); + static PFNclGetHostTimer _clGetHostTimer = (PFNclGetHostTimer)cl::internals::getClFunction(cl::CLFunctions::GetHostTimer); + return _clGetHostTimer(device, host_timestamp); +} +#endif +inline cl_context CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateContext)(const cl_context_properties* properties, cl_uint num_devices, const cl_device_id* devices, void (CL_CALLBACK* pfn_notify)(const char*, const void*, size_t, void*), void* user_data, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_context(CL_API_CALL * PFNclCreateContext)(const cl_context_properties * properties, cl_uint num_devices, const cl_device_id * devices, void (CL_CALLBACK*)(const char*, const void*, size_t, void*), void* user_data, cl_int * errcode_ret); + static PFNclCreateContext _clCreateContext = (PFNclCreateContext)cl::internals::getClFunction(cl::CLFunctions::CreateContext); + return _clCreateContext(properties, num_devices, devices, pfn_notify, user_data, errcode_ret); +} +inline cl_context CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateContextFromType)(const cl_context_properties* properties, cl_device_type device_type, void (CL_CALLBACK* pfn_notify)(const char*, const void*, size_t, void*), void* user_data, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_context(CL_API_CALL * PFNclCreateContextFromType)(const cl_context_properties * properties, cl_device_type device_type, void (CL_CALLBACK*)(const char*, const void*, size_t, void*), void* user_data, cl_int * errcode_ret); + static PFNclCreateContextFromType _clCreateContextFromType = (PFNclCreateContextFromType)cl::internals::getClFunction(cl::CLFunctions::CreateContextFromType); + return _clCreateContextFromType(properties, device_type, pfn_notify, user_data, errcode_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(RetainContext)(cl_context context) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclRetainContext)(cl_context context); + static PFNclRetainContext _clRetainContext = (PFNclRetainContext)cl::internals::getClFunction(cl::CLFunctions::RetainContext); + return _clRetainContext(context); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(ReleaseContext)(cl_context context) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclReleaseContext)(cl_context context); + static PFNclReleaseContext _clReleaseContext = (PFNclReleaseContext)cl::internals::getClFunction(cl::CLFunctions::ReleaseContext); + return _clReleaseContext(context); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetContextInfo)(cl_context context, cl_context_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetContextInfo)(cl_context context, cl_context_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetContextInfo _clGetContextInfo = (PFNclGetContextInfo)cl::internals::getClFunction(cl::CLFunctions::GetContextInfo); + return _clGetContextInfo(context, param_name, param_value_size, param_value, param_value_size_ret); +} +#ifdef CL_API_SUFFIX__VERSION_2_0 +inline cl_command_queue CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateCommandQueueWithProperties)(cl_context context, cl_device_id device, const cl_queue_properties* properties, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_command_queue(CL_API_CALL * PFNclCreateCommandQueueWithProperties)(cl_context context, cl_device_id device, const cl_queue_properties * properties, cl_int * errcode_ret); + static PFNclCreateCommandQueueWithProperties _clCreateCommandQueueWithProperties = (PFNclCreateCommandQueueWithProperties)cl::internals::getClFunction(cl::CLFunctions::CreateCommandQueueWithProperties); + return _clCreateCommandQueueWithProperties(context, device, properties, errcode_ret); +} +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(RetainCommandQueue)(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclRetainCommandQueue)(cl_command_queue command_queue); + static PFNclRetainCommandQueue _clRetainCommandQueue = (PFNclRetainCommandQueue)cl::internals::getClFunction(cl::CLFunctions::RetainCommandQueue); + return _clRetainCommandQueue(command_queue); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(ReleaseCommandQueue)(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclReleaseCommandQueue)(cl_command_queue command_queue); + static PFNclReleaseCommandQueue _clReleaseCommandQueue = (PFNclReleaseCommandQueue)cl::internals::getClFunction(cl::CLFunctions::ReleaseCommandQueue); + return _clReleaseCommandQueue(command_queue); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetCommandQueueInfo)(cl_command_queue command_queue, cl_command_queue_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetCommandQueueInfo)(cl_command_queue command_queue, cl_command_queue_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetCommandQueueInfo _clGetCommandQueueInfo = (PFNclGetCommandQueueInfo)cl::internals::getClFunction(cl::CLFunctions::GetCommandQueueInfo); + return _clGetCommandQueueInfo(command_queue, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_mem CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateBuffer)(cl_context context, cl_mem_flags flags, size_t size, void* host_ptr, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_mem(CL_API_CALL * PFNclCreateBuffer)(cl_context context, cl_mem_flags flags, size_t size, void* host_ptr, cl_int * errcode_ret); + static PFNclCreateBuffer _clCreateBuffer = (PFNclCreateBuffer)cl::internals::getClFunction(cl::CLFunctions::CreateBuffer); + return _clCreateBuffer(context, flags, size, host_ptr, errcode_ret); +} +inline cl_mem CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateSubBuffer)(cl_mem buffer, cl_mem_flags flags, cl_buffer_create_type buffer_create_type, const void* buffer_create_info, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1 +{ + typedef cl_mem(CL_API_CALL * PFNclCreateSubBuffer)(cl_mem buffer, cl_mem_flags flags, cl_buffer_create_type buffer_create_type, const void* buffer_create_info, cl_int * errcode_ret); + static PFNclCreateSubBuffer _clCreateSubBuffer = (PFNclCreateSubBuffer)cl::internals::getClFunction(cl::CLFunctions::CreateSubBuffer); + return _clCreateSubBuffer(buffer, flags, buffer_create_type, buffer_create_info, errcode_ret); +} +inline cl_mem CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateImage)(cl_context context, cl_mem_flags flags, const cl_image_format* image_format, const cl_image_desc* image_desc, void* host_ptr, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_mem(CL_API_CALL * PFNclCreateImage)(cl_context context, cl_mem_flags flags, const cl_image_format * image_format, const cl_image_desc * image_desc, void* host_ptr, cl_int * errcode_ret); + static PFNclCreateImage _clCreateImage = (PFNclCreateImage)cl::internals::getClFunction(cl::CLFunctions::CreateImage); + return _clCreateImage(context, flags, image_format, image_desc, host_ptr, errcode_ret); +} +#ifdef CL_API_SUFFIX__VERSION_2_0 +inline cl_mem CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreatePipe)(cl_context context, cl_mem_flags flags, cl_uint pipe_packet_size, cl_uint pipe_max_packets, const cl_pipe_properties* properties, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_mem(CL_API_CALL * PFNclCreatePipe)(cl_context context, cl_mem_flags flags, cl_uint pipe_packet_size, cl_uint pipe_max_packets, const cl_pipe_properties * properties, cl_int * errcode_ret); + static PFNclCreatePipe _clCreatePipe = (PFNclCreatePipe)cl::internals::getClFunction(cl::CLFunctions::CreatePipe); + return _clCreatePipe(context, flags, pipe_packet_size, pipe_max_packets, properties, errcode_ret); +} +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(RetainMemObject)(cl_mem memobj) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclRetainMemObject)(cl_mem memobj); + static PFNclRetainMemObject _clRetainMemObject = (PFNclRetainMemObject)cl::internals::getClFunction(cl::CLFunctions::RetainMemObject); + return _clRetainMemObject(memobj); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(ReleaseMemObject)(cl_mem memobj) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclReleaseMemObject)(cl_mem memobj); + static PFNclReleaseMemObject _clReleaseMemObject = (PFNclReleaseMemObject)cl::internals::getClFunction(cl::CLFunctions::ReleaseMemObject); + return _clReleaseMemObject(memobj); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetSupportedImageFormats)(cl_context context, cl_mem_flags flags, cl_mem_object_type image_type, cl_uint num_entries, cl_image_format* image_formats, cl_uint* num_image_formats) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetSupportedImageFormats)(cl_context context, cl_mem_flags flags, cl_mem_object_type image_type, cl_uint num_entries, cl_image_format * image_formats, cl_uint * num_image_formats); + static PFNclGetSupportedImageFormats _clGetSupportedImageFormats = (PFNclGetSupportedImageFormats)cl::internals::getClFunction(cl::CLFunctions::GetSupportedImageFormats); + return _clGetSupportedImageFormats(context, flags, image_type, num_entries, image_formats, num_image_formats); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetMemObjectInfo)(cl_mem memobj, cl_mem_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetMemObjectInfo)(cl_mem memobj, cl_mem_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetMemObjectInfo _clGetMemObjectInfo = (PFNclGetMemObjectInfo)cl::internals::getClFunction(cl::CLFunctions::GetMemObjectInfo); + return _clGetMemObjectInfo(memobj, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetImageInfo)(cl_mem image, cl_image_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetImageInfo)(cl_mem image, cl_image_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetImageInfo _clGetImageInfo = (PFNclGetImageInfo)cl::internals::getClFunction(cl::CLFunctions::GetImageInfo); + return _clGetImageInfo(image, param_name, param_value_size, param_value, param_value_size_ret); +} +#ifdef CL_API_SUFFIX__VERSION_2_0 +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetPipeInfo)(cl_mem pipe, cl_pipe_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetPipeInfo)(cl_mem pipe, cl_pipe_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetPipeInfo _clGetPipeInfo = (PFNclGetPipeInfo)cl::internals::getClFunction(cl::CLFunctions::GetPipeInfo); + return _clGetPipeInfo(pipe, param_name, param_value_size, param_value, param_value_size_ret); +} +inline void* CL_API_CALL DEFINE_CL_FUNCTION_NAME(SVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, cl_uint alignment) CL_API_SUFFIX__VERSION_2_0 +{ + typedef void* (CL_API_CALL * PFNclSVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, cl_uint alignment); + PFNclSVMAlloc _clSVMAlloc = (PFNclSVMAlloc)cl::internals::getClFunction(cl::CLFunctions::SVMAlloc); + return _clSVMAlloc(context, flags, size, alignment); +} +inline void CL_API_CALL DEFINE_CL_FUNCTION_NAME(SVMFree)(cl_context context, void* svm_pointer) CL_API_SUFFIX__VERSION_2_0 +{ + typedef void(CL_API_CALL * PFNclSVMFree)(cl_context context, void* svm_pointer); + PFNclSVMFree _clSVMFree = (PFNclSVMFree)cl::internals::getClFunction(cl::CLFunctions::SVMFree); + return _clSVMFree(context, svm_pointer); +} +inline cl_sampler CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateSamplerWithProperties)(cl_context context, const cl_sampler_properties* normalized_coords, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_sampler(CL_API_CALL * PFNclCreateSamplerWithProperties)(cl_context context, const cl_sampler_properties * normalized_coords, cl_int * errcode_ret); + PFNclCreateSamplerWithProperties _clCreateSamplerWithProperties = (PFNclCreateSamplerWithProperties)cl::internals::getClFunction(cl::CLFunctions::CreateSamplerWithProperties); + return _clCreateSamplerWithProperties(context, normalized_coords, errcode_ret); +} +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetMemObjectDestructorCallback)(cl_mem memobj, void (CL_CALLBACK* pfn_notify)(cl_mem memobj, void* user_data), void* user_data) CL_API_SUFFIX__VERSION_1_1 +{ + typedef cl_int(CL_API_CALL * PFNclSetMemObjectDestructorCallback)(cl_mem memobj, void (CL_CALLBACK * pfn_notify)(cl_mem memobj, void* user_data), void* user_data); + static PFNclSetMemObjectDestructorCallback _clSetMemObjectDestructorCallback = (PFNclSetMemObjectDestructorCallback)cl::internals::getClFunction(cl::CLFunctions::SetMemObjectDestructorCallback); + return _clSetMemObjectDestructorCallback(memobj, pfn_notify, user_data); +} + +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(RetainSampler)(cl_sampler sampler) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclRetainSampler)(cl_sampler sampler); + static PFNclRetainSampler _clRetainSampler = (PFNclRetainSampler)cl::internals::getClFunction(cl::CLFunctions::RetainSampler); + return _clRetainSampler(sampler); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(ReleaseSampler)(cl_sampler sampler) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclReleaseSampler)(cl_sampler sampler); + static PFNclReleaseSampler _clReleaseSampler = (PFNclReleaseSampler)cl::internals::getClFunction(cl::CLFunctions::ReleaseSampler); + return _clReleaseSampler(sampler); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetSamplerInfo)(cl_sampler sampler, cl_sampler_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetSamplerInfo)(cl_sampler sampler, cl_sampler_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetSamplerInfo _clGetSamplerInfo = (PFNclGetSamplerInfo)cl::internals::getClFunction(cl::CLFunctions::GetSamplerInfo); + return _clGetSamplerInfo(sampler, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_program CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateProgramWithSource)(cl_context context, cl_uint count, const char** strings, const size_t* lengths, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_program(CL_API_CALL * PFNclCreateProgramWithSource)(cl_context context, cl_uint count, const char** strings, const size_t* lengths, cl_int * errcode_ret); + static PFNclCreateProgramWithSource _clCreateProgramWithSource = (PFNclCreateProgramWithSource)cl::internals::getClFunction(cl::CLFunctions::CreateProgramWithSource); + return _clCreateProgramWithSource(context, count, strings, lengths, errcode_ret); +} +inline cl_program CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateProgramWithBinary)(cl_context context, cl_uint num_devices, const cl_device_id* device_list, const size_t* lengths, const unsigned char** binaries, cl_int* binary_status, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_program(CL_API_CALL * PFNclCreateProgramWithBinary)(cl_context context, cl_uint num_devices, const cl_device_id * device_list, const size_t* lengths, const unsigned char** binaries, cl_int * binary_status, cl_int * errcode_ret); + static PFNclCreateProgramWithBinary _clCreateProgramWithBinary = (PFNclCreateProgramWithBinary)cl::internals::getClFunction(cl::CLFunctions::CreateProgramWithBinary); + return _clCreateProgramWithBinary(context, num_devices, device_list, lengths, binaries, binary_status, errcode_ret); +} +inline cl_program CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateProgramWithBuiltInKernels)(cl_context context, cl_uint num_devices, const cl_device_id* device_list, const char* kernel_names, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_program(CL_API_CALL * PFNclCreateProgramWithBuiltInKernels)(cl_context context, cl_uint num_devices, const cl_device_id * device_list, const char* kernel_names, cl_int * errcode_ret); + static PFNclCreateProgramWithBuiltInKernels _clCreateProgramWithBuiltInKernels = (PFNclCreateProgramWithBuiltInKernels)cl::internals::getClFunction(cl::CLFunctions::CreateProgramWithBuiltInKernels); + return _clCreateProgramWithBuiltInKernels(context, num_devices, device_list, kernel_names, errcode_ret); +} +#ifdef CL_API_SUFFIX__VERSION_2_1 +inline cl_program CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateProgramWithIL)(cl_context context, const void* il, size_t length, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_2_1 +{ + typedef cl_program(CL_API_CALL * PFNclCreateProgramWithIL)(cl_context context, const void* il, size_t length, cl_int * errcode_ret); + static PFNclCreateProgramWithIL _clCreateProgramWithIL = (PFNclCreateProgramWithIL)cl::internals::getClFunction(cl::CLFunctions::CreateProgramWithIL); + return _clCreateProgramWithIL(context, il, length, errcode_ret); +} +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(RetainProgram)(cl_program program) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclRetainProgram)(cl_program program); + static PFNclRetainProgram _clRetainProgram = (PFNclRetainProgram)cl::internals::getClFunction(cl::CLFunctions::RetainProgram); + return _clRetainProgram(program); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(ReleaseProgram)(cl_program program) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclReleaseProgram)(cl_program program); + static PFNclReleaseProgram _clReleaseProgram = (PFNclReleaseProgram)cl::internals::getClFunction(cl::CLFunctions::ReleaseProgram); + return _clReleaseProgram(program); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(BuildProgram)(cl_program program, cl_uint num_devices, const cl_device_id* device_list, const char* options, void (CL_CALLBACK* pfn_notify)(cl_program program, void* user_data), void* user_data) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclBuildProgram)(cl_program program, cl_uint num_devices, const cl_device_id * device_list, const char* options, void (CL_CALLBACK * pfn_notify)(cl_program program, void* user_data), void* user_data); + static PFNclBuildProgram _clBuildProgram = (PFNclBuildProgram)cl::internals::getClFunction(cl::CLFunctions::BuildProgram); + return _clBuildProgram(program, num_devices, device_list, options, pfn_notify, user_data); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(CompileProgram)(cl_program program, cl_uint num_devices, const cl_device_id* device_list, const char* options, cl_uint num_input_headers, const cl_program* input_headers, const char** header_include_names, void (CL_CALLBACK* pfn_notify)(cl_program program, void* user_data), void* user_data) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclCompileProgram)(cl_program program, cl_uint num_devices, const cl_device_id * device_list, const char* options, cl_uint num_input_headers, const cl_program * input_headers, const char** header_include_names, void (CL_CALLBACK * pfn_notify)(cl_program program, void* user_data), void* user_data); + static PFNclCompileProgram _clCompileProgram = (PFNclCompileProgram)cl::internals::getClFunction(cl::CLFunctions::CompileProgram); + return _clCompileProgram(program, num_devices, device_list, options, num_input_headers, input_headers, header_include_names, pfn_notify, user_data); +} +inline cl_program CL_API_CALL DEFINE_CL_FUNCTION_NAME(LinkProgram)(cl_context context, cl_uint num_devices, const cl_device_id* device_list, const char* options, cl_uint num_input_programs, const cl_program* input_programs, void (CL_CALLBACK* pfn_notify)(cl_program program, void* user_data), void* user_data, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_program(CL_API_CALL * PFNclLinkProgram)(cl_context context, cl_uint num_devices, const cl_device_id * device_list, const char* options, cl_uint num_input_programs, const cl_program * input_programs, void (CL_CALLBACK * pfn_notify)(cl_program program, void* user_data), void* user_data, cl_int * errcode_ret); + static PFNclLinkProgram _clLinkProgram = (PFNclLinkProgram)cl::internals::getClFunction(cl::CLFunctions::LinkProgram); + return _clLinkProgram(context, num_devices, device_list, options, num_input_programs, input_programs, pfn_notify, user_data, errcode_ret); +} +#ifdef CL_API_SUFFIX__VERSION_2_2 +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetProgramReleaseCallback)(cl_program program, void (CL_CALLBACK* pfn_notify)(cl_program program, void* user_data), void* user_data) CL_API_SUFFIX__VERSION_2_2 +{ + typedef cl_int(CL_API_CALL * PFNclSetProgramReleaseCallback)(cl_program program, void (CL_CALLBACK * pfn_notify)(cl_program program, void* user_data), void* user_data); + static PFNclSetProgramReleaseCallback _clSetProgramReleaseCallback = (PFNclSetProgramReleaseCallback)cl::internals::getClFunction(cl::CLFunctions::SetProgramReleaseCallback); + return _clSetProgramReleaseCallback(program, pfn_notify, user_data); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetProgramSpecializationConstant)(cl_program program, cl_uint spec_id, size_t spec_size, const void* spec_value) CL_API_SUFFIX__VERSION_2_2 +{ + typedef cl_int(CL_API_CALL * PFNclSetProgramSpecializationConstant)(cl_program program, cl_uint spec_id, size_t spec_size, const void* spec_value); + static PFNclSetProgramSpecializationConstant _clSetProgramSpecializationConstant = (PFNclSetProgramSpecializationConstant)cl::internals::getClFunction(cl::CLFunctions::SetProgramSpecializationConstant); + return _clSetProgramSpecializationConstant(program, spec_id, spec_size, spec_value); +} +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(UnloadPlatformCompiler)(cl_platform_id platform) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclUnloadPlatformCompiler)(cl_platform_id platform); + static PFNclUnloadPlatformCompiler _clUnloadPlatformCompiler = (PFNclUnloadPlatformCompiler)cl::internals::getClFunction(cl::CLFunctions::UnloadPlatformCompiler); + return _clUnloadPlatformCompiler(platform); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetProgramInfo)(cl_program program, cl_program_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetProgramInfo)(cl_program program, cl_program_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetProgramInfo _clGetProgramInfo = (PFNclGetProgramInfo)cl::internals::getClFunction(cl::CLFunctions::GetProgramInfo); + return _clGetProgramInfo(program, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetProgramBuildInfo)(cl_program program, cl_device_id device, cl_program_build_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetProgramBuildInfo)(cl_program program, cl_device_id device, cl_program_build_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetProgramBuildInfo _clGetProgramBuildInfo = (PFNclGetProgramBuildInfo)cl::internals::getClFunction(cl::CLFunctions::GetProgramBuildInfo); + return _clGetProgramBuildInfo(program, device, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_kernel CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateKernel)(cl_program program, const char* kernel_name, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_kernel(CL_API_CALL * PFNclCreateKernel)(cl_program program, const char* kernel_name, cl_int * errcode_ret); + static PFNclCreateKernel _clCreateKernel = (PFNclCreateKernel)cl::internals::getClFunction(cl::CLFunctions::CreateKernel); + return _clCreateKernel(program, kernel_name, errcode_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateKernelsInProgram)(cl_program program, cl_uint num_kernels, cl_kernel* kernels, cl_uint* num_kernels_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclCreateKernelsInProgram)(cl_program program, cl_uint num_kernels, cl_kernel * kernels, cl_uint * num_kernels_ret); + static PFNclCreateKernelsInProgram _clCreateKernelsInProgram = (PFNclCreateKernelsInProgram)cl::internals::getClFunction(cl::CLFunctions::CreateKernelsInProgram); + return _clCreateKernelsInProgram(program, num_kernels, kernels, num_kernels_ret); +} +#ifdef CL_API_SUFFIX__VERSION_2_1 +inline cl_kernel CL_API_CALL DEFINE_CL_FUNCTION_NAME(CloneKernel)(cl_kernel source_kernel, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_2_1 +{ + typedef cl_kernel(CL_API_CALL * PFNclCloneKernel)(cl_kernel source_kernel, cl_int * errcode_ret); + static PFNclCloneKernel _clCloneKernel = (PFNclCloneKernel)cl::internals::getClFunction(cl::CLFunctions::CloneKernel); + return _clCloneKernel(source_kernel, errcode_ret); +} +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(RetainKernel)(cl_kernel kernel) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclRetainKernel)(cl_kernel kernel); + static PFNclRetainKernel _clRetainKernel = (PFNclRetainKernel)cl::internals::getClFunction(cl::CLFunctions::RetainKernel); + return _clRetainKernel(kernel); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(ReleaseKernel)(cl_kernel kernel) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclReleaseKernel)(cl_kernel kernel); + static PFNclReleaseKernel _clReleaseKernel = (PFNclReleaseKernel)cl::internals::getClFunction(cl::CLFunctions::ReleaseKernel); + return _clReleaseKernel(kernel); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetKernelArg)(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void* arg_value) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclSetKernelArg)(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void* arg_value); + static PFNclSetKernelArg _clSetKernelArg = (PFNclSetKernelArg)cl::internals::getClFunction(cl::CLFunctions::SetKernelArg); + return _clSetKernelArg(kernel, arg_index, arg_size, arg_value); +} +#ifdef CL_API_SUFFIX__VERSION_2_0 +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_int(CL_API_CALL * PFNclSetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value); + static PFNclSetKernelArgSVMPointer _clSetKernelArgSVMPointer = (PFNclSetKernelArgSVMPointer)cl::internals::getClFunction(cl::CLFunctions::SetKernelArgSVMPointer); + return _clSetKernelArgSVMPointer(kernel, arg_index, arg_value); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_int(CL_API_CALL * PFNclSetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value); + static PFNclSetKernelExecInfo _clSetKernelExecInfo = (PFNclSetKernelExecInfo)cl::internals::getClFunction(cl::CLFunctions::SetKernelExecInfo); + return _clSetKernelExecInfo(kernel, param_name, param_value_size, param_value); +} +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetKernelInfo)(cl_kernel kernel, cl_kernel_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetKernelInfo)(cl_kernel kernel, cl_kernel_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetKernelInfo _clGetKernelInfo = (PFNclGetKernelInfo)cl::internals::getClFunction(cl::CLFunctions::GetKernelInfo); + return _clGetKernelInfo(kernel, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetKernelArgInfo)(cl_kernel kernel, cl_uint arg_indx, cl_kernel_arg_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclGetKernelArgInfo)(cl_kernel kernel, cl_uint arg_indx, cl_kernel_arg_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetKernelArgInfo _clGetKernelArgInfo = (PFNclGetKernelArgInfo)cl::internals::getClFunction(cl::CLFunctions::GetKernelArgInfo); + return _clGetKernelArgInfo(kernel, arg_indx, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetKernelWorkGroupInfo)(cl_kernel kernel, cl_device_id device, cl_kernel_work_group_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetKernelWorkGroupInfo)(cl_kernel kernel, cl_device_id device, cl_kernel_work_group_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetKernelWorkGroupInfo _clGetKernelWorkGroupInfo = (PFNclGetKernelWorkGroupInfo)cl::internals::getClFunction(cl::CLFunctions::GetKernelWorkGroupInfo); + return _clGetKernelWorkGroupInfo(kernel, device, param_name, param_value_size, param_value, param_value_size_ret); +} +#ifdef CL_API_SUFFIX__VERSION_2_1 +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetKernelSubGroupInfo)(cl_kernel kernel, cl_device_id device, cl_kernel_sub_group_info param_name, size_t input_value_size, const void* input_value, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_2_1 +{ + typedef cl_int(CL_API_CALL * PFNclGetKernelSubGroupInfo)(cl_kernel kernel, cl_device_id device, cl_kernel_sub_group_info param_name, size_t input_value_size, const void* input_value, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetKernelSubGroupInfo _clGetKernelSubGroupInfo = (PFNclGetKernelSubGroupInfo)cl::internals::getClFunction(cl::CLFunctions::GetKernelSubGroupInfo); + return _clGetKernelSubGroupInfo(kernel, device, param_name, input_value_size, input_value, param_value_size, param_value, param_value_size_ret); +} +#endif +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(WaitForEvents)(cl_uint num_events, const cl_event* event_list) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclWaitForEvents)(cl_uint num_events, const cl_event * event_list); + static PFNclWaitForEvents _clWaitForEvents = (PFNclWaitForEvents)cl::internals::getClFunction(cl::CLFunctions::WaitForEvents); + return _clWaitForEvents(num_events, event_list); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetEventInfo)(cl_event event, cl_event_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetEventInfo)(cl_event event, cl_event_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetEventInfo _clGetEventInfo = (PFNclGetEventInfo)cl::internals::getClFunction(cl::CLFunctions::GetEventInfo); + return _clGetEventInfo(event, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_event CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateUserEvent)(cl_context context, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1 +{ + typedef cl_event(CL_API_CALL * PFNclCreateUserEvent)(cl_context context, cl_int * errcode_ret); + static PFNclCreateUserEvent _clCreateUserEvent = (PFNclCreateUserEvent)cl::internals::getClFunction(cl::CLFunctions::CreateUserEvent); + return _clCreateUserEvent(context, errcode_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(RetainEvent)(cl_event event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclRetainEvent)(cl_event event); + static PFNclRetainEvent _clRetainEvent = (PFNclRetainEvent)cl::internals::getClFunction(cl::CLFunctions::RetainEvent); + return _clRetainEvent(event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(ReleaseEvent)(cl_event event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclReleaseEvent)(cl_event event); + static PFNclReleaseEvent _clReleaseEvent = (PFNclReleaseEvent)cl::internals::getClFunction(cl::CLFunctions::ReleaseEvent); + return _clReleaseEvent(event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetUserEventStatus)(cl_event event, cl_int execution_status) CL_API_SUFFIX__VERSION_1_1 +{ + typedef cl_int(CL_API_CALL * PFNclSetUserEventStatus)(cl_event event, cl_int execution_status); + static PFNclSetUserEventStatus _clSetUserEventStatus = (PFNclSetUserEventStatus)cl::internals::getClFunction(cl::CLFunctions::SetUserEventStatus); + return _clSetUserEventStatus(event, execution_status); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(SetEventCallback)(cl_event event, cl_int command_exec_callback_type, void (CL_CALLBACK* pfn_notify)(cl_event, cl_int, void*), void* user_data) CL_API_SUFFIX__VERSION_1_1 +{ + typedef cl_int(CL_API_CALL * PFNclSetEventCallback)(cl_event event, cl_int command_exec_callback_type, void (CL_CALLBACK * pfn_notify)(cl_event, cl_int, void*), void* user_data); + static PFNclSetEventCallback _clSetEventCallback = (PFNclSetEventCallback)cl::internals::getClFunction(cl::CLFunctions::SetEventCallback); + return _clSetEventCallback(event, command_exec_callback_type, pfn_notify, user_data); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetEventProfilingInfo)(cl_event event, cl_profiling_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclGetEventProfilingInfo)(cl_event event, cl_profiling_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret); + static PFNclGetEventProfilingInfo _clGetEventProfilingInfo = (PFNclGetEventProfilingInfo)cl::internals::getClFunction(cl::CLFunctions::GetEventProfilingInfo); + return _clGetEventProfilingInfo(event, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(Flush)(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclFlush)(cl_command_queue command_queue); + static PFNclFlush _clFlush = (PFNclFlush)cl::internals::getClFunction(cl::CLFunctions::Flush); + return _clFlush(command_queue); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(Finish)(cl_command_queue command_queue) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclFinish)(cl_command_queue command_queue); + static PFNclFinish _clFinish = (PFNclFinish)cl::internals::getClFunction(cl::CLFunctions::Finish); + return _clFinish(command_queue); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueReadBuffer)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_read, size_t offset, size_t size, void* ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueReadBuffer)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_read, size_t offset, size_t size, void* ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueReadBuffer _clEnqueueReadBuffer = (PFNclEnqueueReadBuffer)cl::internals::getClFunction(cl::CLFunctions::EnqueueReadBuffer); + return _clEnqueueReadBuffer(command_queue, buffer, blocking_read, offset, size, ptr, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueReadBufferRect)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_read, const size_t* buffer_offset, const size_t* host_offset, const size_t* region, size_t buffer_row_pitch, size_t buffer_slice_pitch, size_t host_row_pitch, size_t host_slice_pitch, void* ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_1 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueReadBufferRect)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_read, const size_t* buffer_offset, const size_t* host_offset, const size_t* region, size_t buffer_row_pitch, size_t buffer_slice_pitch, size_t host_row_pitch, size_t host_slice_pitch, void* ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueReadBufferRect _clEnqueueReadBufferRect = (PFNclEnqueueReadBufferRect)cl::internals::getClFunction(cl::CLFunctions::EnqueueReadBufferRect); + return _clEnqueueReadBufferRect(command_queue, buffer, blocking_read, buffer_offset, host_offset, region, buffer_row_pitch, buffer_slice_pitch, host_row_pitch, host_slice_pitch, ptr, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueWriteBuffer)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_write, size_t offset, size_t size, const void* ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueWriteBuffer)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_write, size_t offset, size_t size, const void* ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueWriteBuffer _clEnqueueWriteBuffer = (PFNclEnqueueWriteBuffer)cl::internals::getClFunction(cl::CLFunctions::EnqueueWriteBuffer); + return _clEnqueueWriteBuffer(command_queue, buffer, blocking_write, offset, size, ptr, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueWriteBufferRect)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_write, const size_t* buffer_offset, const size_t* host_offset, const size_t* region, size_t buffer_row_pitch, size_t buffer_slice_pitch, size_t host_row_pitch, size_t host_slice_pitch, const void* ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_1 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueWriteBufferRect)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_write, const size_t* buffer_offset, const size_t* host_offset, const size_t* region, size_t buffer_row_pitch, size_t buffer_slice_pitch, size_t host_row_pitch, size_t host_slice_pitch, const void* ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueWriteBufferRect _clEnqueueWriteBufferRect = (PFNclEnqueueWriteBufferRect)cl::internals::getClFunction(cl::CLFunctions::EnqueueWriteBufferRect); + return _clEnqueueWriteBufferRect(command_queue, buffer, blocking_write, buffer_offset, host_offset, region, buffer_row_pitch, buffer_slice_pitch, host_row_pitch, host_slice_pitch, ptr, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueFillBuffer)(cl_command_queue command_queue, cl_mem buffer, const void* pattern, size_t pattern_size, size_t offset, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueFillBuffer)(cl_command_queue command_queue, cl_mem buffer, const void* pattern, size_t pattern_size, size_t offset, size_t size, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueFillBuffer _clEnqueueFillBuffer = (PFNclEnqueueFillBuffer)cl::internals::getClFunction(cl::CLFunctions::EnqueueFillBuffer); + return _clEnqueueFillBuffer(command_queue, buffer, pattern, pattern_size, offset, size, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueCopyBuffer)(cl_command_queue command_queue, cl_mem src_buffer, cl_mem dst_buffer, size_t src_offset, size_t dst_offset, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueCopyBuffer)(cl_command_queue command_queue, cl_mem src_buffer, cl_mem dst_buffer, size_t src_offset, size_t dst_offset, size_t size, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueCopyBuffer _clEnqueueCopyBuffer = (PFNclEnqueueCopyBuffer)cl::internals::getClFunction(cl::CLFunctions::EnqueueCopyBuffer); + return _clEnqueueCopyBuffer(command_queue, src_buffer, dst_buffer, src_offset, dst_offset, size, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueCopyBufferRect)(cl_command_queue command_queue, cl_mem src_buffer, cl_mem dst_buffer, const size_t* src_origin, const size_t* dst_origin, const size_t* region, size_t src_row_pitch, size_t src_slice_pitch, size_t dst_row_pitch, size_t dst_slice_pitch, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_1 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueCopyBufferRect)(cl_command_queue command_queue, cl_mem src_buffer, cl_mem dst_buffer, const size_t* src_origin, const size_t* dst_origin, const size_t* region, size_t src_row_pitch, size_t src_slice_pitch, size_t dst_row_pitch, size_t dst_slice_pitch, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueCopyBufferRect _clEnqueueCopyBufferRect = (PFNclEnqueueCopyBufferRect)cl::internals::getClFunction(cl::CLFunctions::EnqueueCopyBufferRect); + return _clEnqueueCopyBufferRect(command_queue, src_buffer, dst_buffer, src_origin, dst_origin, region, src_row_pitch, src_slice_pitch, dst_row_pitch, dst_slice_pitch, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueReadImage)(cl_command_queue command_queue, cl_mem image, cl_bool blocking_read, const size_t* origin, const size_t* region, size_t row_pitch, size_t slice_pitch, void* ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueReadImage)(cl_command_queue command_queue, cl_mem image, cl_bool blocking_read, const size_t* origin, const size_t* region, size_t row_pitch, size_t slice_pitch, void* ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueReadImage _clEnqueueReadImage = (PFNclEnqueueReadImage)cl::internals::getClFunction(cl::CLFunctions::EnqueueReadImage); + return _clEnqueueReadImage(command_queue, image, blocking_read, origin, region, row_pitch, slice_pitch, ptr, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueWriteImage)(cl_command_queue command_queue, cl_mem image, cl_bool blocking_write, const size_t* origin, const size_t* region, size_t input_row_pitch, size_t input_slice_pitch, const void* ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueWriteImage)(cl_command_queue command_queue, cl_mem image, cl_bool blocking_write, const size_t* origin, const size_t* region, size_t input_row_pitch, size_t input_slice_pitch, const void* ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueWriteImage _clEnqueueWriteImage = (PFNclEnqueueWriteImage)cl::internals::getClFunction(cl::CLFunctions::EnqueueWriteImage); + return _clEnqueueWriteImage(command_queue, image, blocking_write, origin, region, input_row_pitch, input_slice_pitch, ptr, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueFillImage)(cl_command_queue command_queue, cl_mem image, const void* fill_color, const size_t* origin, const size_t* region, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueFillImage)(cl_command_queue command_queue, cl_mem image, const void* fill_color, const size_t* origin, const size_t* region, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueFillImage _clEnqueueFillImage = (PFNclEnqueueFillImage)cl::internals::getClFunction(cl::CLFunctions::EnqueueFillImage); + return _clEnqueueFillImage(command_queue, image, fill_color, origin, region, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueCopyImage)(cl_command_queue command_queue, cl_mem src_image, cl_mem dst_image, const size_t* src_origin, const size_t* dst_origin, const size_t* region, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueCopyImage)(cl_command_queue command_queue, cl_mem src_image, cl_mem dst_image, const size_t* src_origin, const size_t* dst_origin, const size_t* region, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueCopyImage _clEnqueueCopyImage = (PFNclEnqueueCopyImage)cl::internals::getClFunction(cl::CLFunctions::EnqueueCopyImage); + return _clEnqueueCopyImage(command_queue, src_image, dst_image, src_origin, dst_origin, region, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueCopyImageToBuffer)(cl_command_queue command_queue, cl_mem src_image, cl_mem dst_buffer, const size_t* src_origin, const size_t* region, size_t dst_offset, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueCopyImageToBuffer)(cl_command_queue command_queue, cl_mem src_image, cl_mem dst_buffer, const size_t* src_origin, const size_t* region, size_t dst_offset, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueCopyImageToBuffer _clEnqueueCopyImageToBuffer = (PFNclEnqueueCopyImageToBuffer)cl::internals::getClFunction(cl::CLFunctions::EnqueueCopyImageToBuffer); + return _clEnqueueCopyImageToBuffer(command_queue, src_image, dst_buffer, src_origin, region, dst_offset, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueCopyBufferToImage)(cl_command_queue command_queue, cl_mem src_buffer, cl_mem dst_image, size_t src_offset, const size_t* dst_origin, const size_t* region, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueCopyBufferToImage)(cl_command_queue command_queue, cl_mem src_buffer, cl_mem dst_image, size_t src_offset, const size_t* dst_origin, const size_t* region, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueCopyBufferToImage _clEnqueueCopyBufferToImage = (PFNclEnqueueCopyBufferToImage)cl::internals::getClFunction(cl::CLFunctions::EnqueueCopyBufferToImage); + return _clEnqueueCopyBufferToImage(command_queue, src_buffer, dst_image, src_offset, dst_origin, region, num_events_in_wait_list, event_wait_list, event); +} +inline void* CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueMapBuffer)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_map, cl_map_flags map_flags, size_t offset, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef void* (CL_API_CALL * PFNclEnqueueMapBuffer)(cl_command_queue command_queue, cl_mem buffer, cl_bool blocking_map, cl_map_flags map_flags, size_t offset, size_t size, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event, cl_int * errcode_ret); + static PFNclEnqueueMapBuffer _clEnqueueMapBuffer = (PFNclEnqueueMapBuffer)cl::internals::getClFunction(cl::CLFunctions::EnqueueMapBuffer); + return _clEnqueueMapBuffer(command_queue, buffer, blocking_map, map_flags, offset, size, num_events_in_wait_list, event_wait_list, event, errcode_ret); +} +inline void* CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueMapImage)(cl_command_queue command_queue, cl_mem image, cl_bool blocking_map, cl_map_flags map_flags, const size_t* origin, const size_t* region, size_t* image_row_pitch, size_t* image_slice_pitch, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef void* (CL_API_CALL * PFNclEnqueueMapImage)(cl_command_queue command_queue, cl_mem image, cl_bool blocking_map, cl_map_flags map_flags, const size_t* origin, const size_t* region, size_t* image_row_pitch, size_t* image_slice_pitch, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event, cl_int * errcode_ret); + static PFNclEnqueueMapImage _clEnqueueMapImage = (PFNclEnqueueMapImage)cl::internals::getClFunction(cl::CLFunctions::EnqueueMapImage); + return _clEnqueueMapImage(command_queue, image, blocking_map, map_flags, origin, region, image_row_pitch, image_slice_pitch, num_events_in_wait_list, event_wait_list, event, errcode_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueUnmapMemObject)(cl_command_queue command_queue, cl_mem memobj, void* mapped_ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueUnmapMemObject)(cl_command_queue command_queue, cl_mem memobj, void* mapped_ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueUnmapMemObject _clEnqueueUnmapMemObject = (PFNclEnqueueUnmapMemObject)cl::internals::getClFunction(cl::CLFunctions::EnqueueUnmapMemObject); + return _clEnqueueUnmapMemObject(command_queue, memobj, mapped_ptr, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueMigrateMemObjects)(cl_command_queue command_queue, cl_uint num_mem_objects, const cl_mem* mem_objects, cl_mem_migration_flags flags, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueMigrateMemObjects)(cl_command_queue command_queue, cl_uint num_mem_objects, const cl_mem * mem_objects, cl_mem_migration_flags flags, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueMigrateMemObjects _clEnqueueMigrateMemObjects = (PFNclEnqueueMigrateMemObjects)cl::internals::getClFunction(cl::CLFunctions::EnqueueMigrateMemObjects); + return _clEnqueueMigrateMemObjects(command_queue, num_mem_objects, mem_objects, flags, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueNDRangeKernel)(cl_command_queue command_queue, cl_kernel kernel, cl_uint work_dim, const size_t* global_work_offset, const size_t* global_work_size, const size_t* local_work_size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueNDRangeKernel)(cl_command_queue command_queue, cl_kernel kernel, cl_uint work_dim, const size_t* global_work_offset, const size_t* global_work_size, const size_t* local_work_size, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueNDRangeKernel _clEnqueueNDRangeKernel = (PFNclEnqueueNDRangeKernel)cl::internals::getClFunction(cl::CLFunctions::EnqueueNDRangeKernel); + return _clEnqueueNDRangeKernel(command_queue, kernel, work_dim, global_work_offset, global_work_size, local_work_size, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueNativeKernel)(cl_command_queue command_queue, void (CL_CALLBACK* user_func)(void*), void* args, size_t cb_args, cl_uint num_mem_objects, const cl_mem* mem_list, const void** args_mem_loc, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueNativeKernel)(cl_command_queue command_queue, void (CL_CALLBACK * user_func)(void*), void* args, size_t cb_args, cl_uint num_mem_objects, const cl_mem * mem_list, const void** args_mem_loc, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueNativeKernel _clEnqueueNativeKernel = (PFNclEnqueueNativeKernel)cl::internals::getClFunction(cl::CLFunctions::EnqueueNativeKernel); + return _clEnqueueNativeKernel(command_queue, user_func, args, cb_args, num_mem_objects, mem_list, args_mem_loc, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueMarkerWithWaitList)(cl_command_queue command_queue, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueMarkerWithWaitList)(cl_command_queue command_queue, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueMarkerWithWaitList _clEnqueueMarkerWithWaitList = (PFNclEnqueueMarkerWithWaitList)cl::internals::getClFunction(cl::CLFunctions::EnqueueMarkerWithWaitList); + return _clEnqueueMarkerWithWaitList(command_queue, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueBarrierWithWaitList)(cl_command_queue command_queue, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueBarrierWithWaitList)(cl_command_queue command_queue, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueBarrierWithWaitList _clEnqueueBarrierWithWaitList = (PFNclEnqueueBarrierWithWaitList)cl::internals::getClFunction(cl::CLFunctions::EnqueueBarrierWithWaitList); + return _clEnqueueBarrierWithWaitList(command_queue, num_events_in_wait_list, event_wait_list, event); +} +#ifdef CL_API_SUFFIX__VERSION_2_0 +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueSVMFree)(cl_command_queue command_queue, cl_uint num_svm_pointers, void* svm_pointers[], void (CL_CALLBACK* pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void* svm_pointers[], void* user_data), void* user_data, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueSVMFree)(cl_command_queue command_queue, cl_uint num_svm_pointers, void* svm_pointers[], void (CL_CALLBACK * pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void* svm_pointers[], void* user_data), void* user_data, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueSVMFree _clEnqueueSVMFree = (PFNclEnqueueSVMFree)cl::internals::getClFunction(cl::CLFunctions::EnqueueSVMFree); + return _clEnqueueSVMFree(command_queue, num_svm_pointers, svm_pointers, pfn_free_func, user_data, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueSVMMemcpy _clEnqueueSVMMemcpy = (PFNclEnqueueSVMMemcpy)cl::internals::getClFunction(cl::CLFunctions::EnqueueSVMMemcpy); + return _clEnqueueSVMMemcpy(command_queue, blocking_copy, dst_ptr, src_ptr, size, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueSVMMemFill _clEnqueueSVMMemFill = (PFNclEnqueueSVMMemFill)cl::internals::getClFunction(cl::CLFunctions::EnqueueSVMMemFill); + return _clEnqueueSVMMemFill(command_queue, svm_ptr, pattern, pattern_size, size, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags flags, void* svm_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags flags, void* svm_ptr, size_t size, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueSVMMap _clEnqueueSVMMap = (PFNclEnqueueSVMMap)cl::internals::getClFunction(cl::CLFunctions::EnqueueSVMMap); + return _clEnqueueSVMMap(command_queue, blocking_map, flags, svm_ptr, size, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_2_0 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueSVMUnmap _clEnqueueSVMUnmap = (PFNclEnqueueSVMUnmap)cl::internals::getClFunction(cl::CLFunctions::EnqueueSVMUnmap); + return _clEnqueueSVMUnmap(command_queue, svm_ptr, num_events_in_wait_list, event_wait_list, event); +} +#endif +#ifdef CL_API_SUFFIX__VERSION_2_1 +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueSVMMigrateMem)(cl_command_queue command_queue, cl_uint num_svm_pointers, const void** svm_pointers, const size_t* sizes, cl_mem_migration_flags flags, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_2_1 +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueSVMMigrateMem)(cl_command_queue command_queue, cl_uint num_svm_pointers, const void** svm_pointers, const size_t* sizes, cl_mem_migration_flags flags, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueSVMMigrateMem _clEnqueueSVMMigrateMem = (PFNclEnqueueSVMMigrateMem)cl::internals::getClFunction(cl::CLFunctions::EnqueueSVMMigrateMem); + return _clEnqueueSVMMigrateMem(command_queue, num_svm_pointers, svm_pointers, sizes, flags, num_events_in_wait_list, event_wait_list, event); + +} +#endif +inline void* CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetExtensionFunctionAddressForPlatform)(cl_platform_id platform, const char* func_name) CL_API_SUFFIX__VERSION_1_2 +{ + typedef void* (CL_API_CALL * PFNclGetExtensionFunctionAddressForPlatform)(cl_platform_id platform, const char* func_name); + static PFNclGetExtensionFunctionAddressForPlatform _clGetExtensionFunctionAddressForPlatform = (PFNclGetExtensionFunctionAddressForPlatform)cl::internals::getClFunction(cl::CLFunctions::GetExtensionFunctionAddressForPlatform); + return _clGetExtensionFunctionAddressForPlatform(platform, func_name); +} +CL_EXT_PREFIX__VERSION_1_1_DEPRECATED inline cl_mem CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateImage2D)(cl_context context, cl_mem_flags flags, const cl_image_format* image_format, size_t image_width, size_t image_height, size_t image_row_pitch, void* host_ptr, cl_int* errcode_ret) +{ + typedef cl_mem(CL_API_CALL * PFNclCreateImage2D)(cl_context context, cl_mem_flags flags, const cl_image_format * image_format, size_t image_width, size_t image_height, size_t image_row_pitch, void* host_ptr, cl_int * errcode_ret); + static PFNclCreateImage2D _clCreateImage2D = (PFNclCreateImage2D)cl::internals::getClFunction(cl::CLFunctions::CreateImage2D); + return _clCreateImage2D(context, flags, image_format, image_width, image_height, image_row_pitch, host_ptr, errcode_ret); +} +CL_EXT_PREFIX__VERSION_1_1_DEPRECATED inline cl_mem CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateImage3D)(cl_context context, cl_mem_flags flags, const cl_image_format* image_format, size_t image_width, size_t image_height, size_t image_depth, size_t image_row_pitch, size_t image_slice_pitch, void* host_ptr, cl_int* errcode_ret) +{ + typedef cl_mem(CL_API_CALL * PFNclCreateImage3D)(cl_context context, cl_mem_flags flags, const cl_image_format * image_format, size_t image_width, size_t image_height, size_t image_depth, size_t image_row_pitch, size_t image_slice_pitch, void* host_ptr, cl_int * errcode_ret); + static PFNclCreateImage3D _clCreateImage3D = (PFNclCreateImage3D)cl::internals::getClFunction(cl::CLFunctions::CreateImage3D); + return _clCreateImage3D(context, flags, image_format, image_width, image_height, image_depth, image_row_pitch, image_slice_pitch, host_ptr, errcode_ret); +} +CL_EXT_PREFIX__VERSION_1_1_DEPRECATED inline cl_int CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueMarker)(cl_command_queue command_queue, cl_event* event) +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueMarker)(cl_command_queue command_queue, cl_event * event); + static PFNclEnqueueMarker _clEnqueueMarker = (PFNclEnqueueMarker)cl::internals::getClFunction(cl::CLFunctions::EnqueueMarker); + return _clEnqueueMarker(command_queue, event); +} +CL_EXT_PREFIX__VERSION_1_1_DEPRECATED inline cl_int CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueWaitForEvents)(cl_command_queue command_queue, cl_uint num_events, const cl_event* event_list) +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueWaitForEvents)(cl_command_queue command_queue, cl_uint num_events, const cl_event * event_list); + static PFNclEnqueueWaitForEvents _clEnqueueWaitForEvents = (PFNclEnqueueWaitForEvents)cl::internals::getClFunction(cl::CLFunctions::EnqueueWaitForEvents); + return _clEnqueueWaitForEvents(command_queue, num_events, event_list); +} +CL_EXT_PREFIX__VERSION_1_1_DEPRECATED inline cl_int CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueBarrier)(cl_command_queue command_queue) +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueBarrier)(cl_command_queue command_queue); + static PFNclEnqueueBarrier _clEnqueueBarrier = (PFNclEnqueueBarrier)cl::internals::getClFunction(cl::CLFunctions::EnqueueBarrier); + return _clEnqueueBarrier(command_queue); +} +CL_EXT_PREFIX__VERSION_1_1_DEPRECATED inline cl_int CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(UnloadCompiler)(void) +{ + typedef cl_int(CL_API_CALL * PFNclUnloadCompiler)(void); + static PFNclUnloadCompiler _clUnloadCompiler = (PFNclUnloadCompiler)cl::internals::getClFunction(cl::CLFunctions::UnloadCompiler); + return _clUnloadCompiler(); +} +CL_EXT_PREFIX__VERSION_1_1_DEPRECATED inline void* CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetExtensionFunctionAddress)(const char* func_name) +{ + typedef void* (CL_API_CALL * PFNclGetExtensionFunctionAddress)(const char* func_name); + static PFNclGetExtensionFunctionAddress _clGetExtensionFunctionAddress = (PFNclGetExtensionFunctionAddress)cl::internals::getClFunction(cl::CLFunctions::GetExtensionFunctionAddress); + return _clGetExtensionFunctionAddress(func_name); +} +#ifdef CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED +CL_EXT_PREFIX__VERSION_1_2_DEPRECATED inline cl_command_queue CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateCommandQueue)(cl_context context, cl_device_id device, cl_command_queue_properties properties, cl_int* errcode_ret) +{ + typedef cl_command_queue(CL_API_CALL * PFNclCreateCommandQueue)(cl_context context, cl_device_id device, cl_command_queue_properties properties, cl_int * errcode_ret); + static PFNclCreateCommandQueue _clCreateCommandQueue = (PFNclCreateCommandQueue)cl::internals::getClFunction(cl::CLFunctions::CreateCommandQueue); + return _clCreateCommandQueue(context, device, properties, errcode_ret); +} +CL_EXT_PREFIX__VERSION_1_2_DEPRECATED inline cl_sampler CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateSampler)(cl_context context, cl_bool normalized_coords, cl_addressing_mode addressing_mode, cl_filter_mode filter_mode, cl_int* errcode_ret) +{ + typedef cl_sampler(CL_API_CALL * PFNclCreateSampler)(cl_context context, cl_bool normalized_coords, cl_addressing_mode addressing_mode, cl_filter_mode filter_mode, cl_int * errcode_ret); + static PFNclCreateSampler _clCreateSampler = (PFNclCreateSampler)cl::internals::getClFunction(cl::CLFunctions::CreateSampler); + return _clCreateSampler(context, normalized_coords, addressing_mode, filter_mode, errcode_ret); +} +CL_EXT_PREFIX__VERSION_1_2_DEPRECATED inline cl_int CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED CL_EXT_SUFFIX__VERSION_1_2_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueTask)(cl_command_queue command_queue, cl_kernel kernel, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) +{ + typedef cl_int(CL_API_CALL * PFNclEnqueueTask)(cl_command_queue command_queue, cl_kernel kernel, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event); + static PFNclEnqueueTask _clEnqueueTask = (PFNclEnqueueTask)cl::internals::getClFunction(cl::CLFunctions::EnqueueTask); + return _clEnqueueTask(command_queue, kernel, num_events_in_wait_list, event_wait_list, event); +} +#endif +/* CL_GL */ +inline cl_mem CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateFromGLBuffer)(cl_context context, cl_mem_flags flags, cl_GLuint bufobj, int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_mem(CL_API_CALL * PFN_clCreateFromGLBuffer)(cl_context, cl_mem_flags, cl_GLuint, int*); + static PFN_clCreateFromGLBuffer _CreateFromGLBuffer = (PFN_clCreateFromGLBuffer)cl::internals::getClFunction(cl::CLFunctions::CreateFromGLBuffer); + return _CreateFromGLBuffer(context, flags, bufobj, errcode_ret); +} +inline cl_mem CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateFromGLTexture)(cl_context context, cl_mem_flags flags, cl_GLenum target, cl_GLint miplevel, cl_GLuint texture, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2 +{ + typedef cl_mem(CL_API_CALL * PFN_clCreateFromGLTexture)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); + static PFN_clCreateFromGLTexture _CreateFromGLTexture = (PFN_clCreateFromGLTexture)cl::internals::getClFunction(cl::CLFunctions::CreateFromGLTexture); + return _CreateFromGLTexture(context, flags, target, miplevel, texture, errcode_ret); +} +inline cl_mem CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateFromGLRenderbuffer)(cl_context context, cl_mem_flags flags, cl_GLuint renderbuffer, cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_mem(CL_API_CALL * PFN_clCreateFromGLRenderbuffer)(cl_context context, cl_mem_flags flags, cl_GLuint renderbuffer, cl_int * errcode_ret); + static PFN_clCreateFromGLRenderbuffer _CreateFromGLRenderbuffer = (PFN_clCreateFromGLRenderbuffer)cl::internals::getClFunction(cl::CLFunctions::CreateFromGLRenderbuffer); + return _CreateFromGLRenderbuffer(context, flags, renderbuffer, errcode_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetGLObjectInfo)(cl_mem memobj, cl_gl_object_type* gl_object_type, cl_GLuint* gl_object_name) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFN_clGetGLObjectInfo)(cl_mem memobj, cl_gl_object_type*, cl_GLuint*); + static PFN_clGetGLObjectInfo _GetGLObjectInfo = (PFN_clGetGLObjectInfo)cl::internals::getClFunction(cl::CLFunctions::GetGLObjectInfo); + return _GetGLObjectInfo(memobj, gl_object_type, gl_object_name); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetGLTextureInfo)(cl_mem memobj, cl_gl_texture_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFN_clGetGLTextureInfo)(cl_mem, cl_gl_texture_info, size_t, void*, size_t*); + static PFN_clGetGLTextureInfo _GetGLTextureInfo = (PFN_clGetGLTextureInfo)cl::internals::getClFunction(cl::CLFunctions::GetGLTextureInfo); + return _GetGLTextureInfo(memobj, param_name, param_value_size, param_value, param_value_size_ret); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueAcquireGLObjects)(cl_command_queue command_queue, cl_uint num_objects, const cl_mem* mem_objects, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFN_clEnqueueAcquireGLObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_uint, const cl_event*, cl_event * event); + + static PFN_clEnqueueAcquireGLObjects _EnqueueAcquireGLObjects = (PFN_clEnqueueAcquireGLObjects)cl::internals::getClFunction(cl::CLFunctions::EnqueueAcquireGLObjects); + return _EnqueueAcquireGLObjects(command_queue, num_objects, mem_objects, num_events_in_wait_list, event_wait_list, event); +} +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(EnqueueReleaseGLObjects)(cl_command_queue command_queue, cl_uint num_objects, const cl_mem* mem_objects, cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFN_clEnqueueReleaseGLObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_uint, const cl_event*, cl_event * event); + static PFN_clEnqueueReleaseGLObjects _EnqueueReleaseGLObjects = (PFN_clEnqueueReleaseGLObjects)cl::internals::getClFunction(cl::CLFunctions::EnqueueReleaseGLObjects); + return _EnqueueReleaseGLObjects(command_queue, num_objects, mem_objects, num_events_in_wait_list, event_wait_list, event); +} +inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateFromGLTexture2D)(cl_context context, cl_mem_flags flags, cl_GLenum target, cl_GLint miplevel, cl_GLuint texture, cl_int* errcode_ret) +{ + typedef cl_mem(CL_API_CALL * PFN_clCreateFromGLTexture2D)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); + static PFN_clCreateFromGLTexture2D _CreateFromGLTexture2D = (PFN_clCreateFromGLTexture2D)cl::internals::getClFunction(cl::CLFunctions::CreateFromGLTexture2D); + return _CreateFromGLTexture2D(context, flags, target, miplevel, texture, errcode_ret); +} +inline CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_API_CALL DEFINE_CL_FUNCTION_NAME(CreateFromGLTexture3D)(cl_context context, cl_mem_flags flags, cl_GLenum target, cl_GLint miplevel, cl_GLuint texture, cl_int* errcode_ret) +{ + typedef cl_mem(CL_API_CALL * PFN_clCreateFromGLTexture3D)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); + static PFN_clCreateFromGLTexture3D _CreateFromGLTexture3D = (PFN_clCreateFromGLTexture3D)cl::internals::getClFunction(cl::CLFunctions::CreateFromGLTexture3D); + return _CreateFromGLTexture3D(context, flags, target, miplevel, texture, errcode_ret); +} +#ifdef cl_gl_context_info +inline cl_int CL_API_CALL DEFINE_CL_FUNCTION_NAME(GetGLContextInfoKHR)(const cl_context_properties* properties, cl_gl_context_info param_name, size_t param_value_size, void* param_value, size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0 +{ + typedef cl_int(CL_API_CALL * PFN_clGetGLContextInfoKHR)(const cl_context_properties*, cl_gl_context_info, size_t, void*, size_t*); + static PFN_clGetGLContextInfoKHR _GetGLContextInfoKHR = (PFN_clGetGLContextInfoKHR)cl::internals::getClFunction(cl::CLFunctions::GetGLContextInfoKHR); + return _GetGLContextInfoKHR(properties, param_name, param_value_size, param_value, param_value_size_ret); +} +#endif +#ifndef DYNAMICCL_NO_NAMESPACE +} +#endif + +namespace cl { +namespace internals { +namespace CLExtFunctions { +enum Enum +{ + CreateFromEGLImageKHR, EnqueueAcquireEGLObjectsKHR, EnqueueReleaseEGLObjectsKHR, CreateEventFromEGLSyncKHR, NUMBER_OF_CL_EXT_FUNCTIONS +}; + +static const std::pair OpenCLExtFunctionNamePairs[] = +{ + { CreateFromEGLImageKHR, "clCreateFromEGLImageKHR" }, + { EnqueueAcquireEGLObjectsKHR, "clEnqueueAcquireEGLObjectsKHR" }, + { EnqueueReleaseEGLObjectsKHR, "clEnqueueReleaseEGLObjectsKHR" }, + { CreateEventFromEGLSyncKHR, "clCreateEventFromEGLSyncKHR" }, +}; +} + +static inline void* GetClExtensionFunction(cl_platform_id platform, const char* funcName) +{ + return (void*)clGetExtensionFunctionAddressForPlatform(platform, funcName); +} + +inline void* getClExtFunction(cl_platform_id platform, CLFunctions::Enum func, bool reset = false) +{ + static void* CLFunctionTable[CLExtFunctions::NUMBER_OF_CL_EXT_FUNCTIONS + 1]; + + if (!CLFunctionTable[CLExtFunctions::NUMBER_OF_CL_EXT_FUNCTIONS] || reset) + { + uint32_t numExtensionStrings = sizeof(CLExtFunctions::OpenCLExtFunctionNamePairs) / sizeof(CLExtFunctions::OpenCLExtFunctionNamePairs[0]); + uint32_t numExtensionEnums = CLExtFunctions::NUMBER_OF_CL_EXT_FUNCTIONS; + + assert(numExtensionStrings == numExtensionEnums); + + for (uint32_t i = 0; i < numExtensionStrings; i++) + { + assert(CLFunctionTable[i] == 0); + CLFunctionTable[i] = GetClExtensionFunction(platform, CLExtFunctions::OpenCLExtFunctionNamePairs[i].second); + } + // Set the last element of the function table to avoid issues + CLFunctionTable[CLExtFunctions::NUMBER_OF_CL_EXT_FUNCTIONS] = (void*)1; + + } + return CLFunctionTable[func]; +} +} +} diff --git a/sources/pvrsdk/Include/EGL/egl.h b/sources/pvrsdk/Include/EGL/egl.h new file mode 100755 index 00000000..bf94f61b --- /dev/null +++ b/sources/pvrsdk/Include/EGL/egl.h @@ -0,0 +1,313 @@ +#ifndef __egl_h_ +#define __egl_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** http://www.khronos.org/registry/egl +** +** Khronos $Git commit SHA1: a732b061e7 $ on $Git commit date: 2017-06-17 23:27:53 +0100 $ +*/ + +#include "EGL/eglplatform.h" + +/* Generated on date 20170627 */ + +/* Generated C header for: + * API: egl + * Versions considered: .* + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef EGL_VERSION_1_0 +#define EGL_VERSION_1_0 1 +typedef unsigned int EGLBoolean; +typedef void *EGLDisplay; +#include "KHR/khrplatform.h" +#include "EGL/eglplatform.h" +typedef void *EGLConfig; +typedef void *EGLSurface; +typedef void *EGLContext; +typedef void (*__eglMustCastToProperFunctionPointerType)(void); +#define EGL_ALPHA_SIZE 0x3021 +#define EGL_BAD_ACCESS 0x3002 +#define EGL_BAD_ALLOC 0x3003 +#define EGL_BAD_ATTRIBUTE 0x3004 +#define EGL_BAD_CONFIG 0x3005 +#define EGL_BAD_CONTEXT 0x3006 +#define EGL_BAD_CURRENT_SURFACE 0x3007 +#define EGL_BAD_DISPLAY 0x3008 +#define EGL_BAD_MATCH 0x3009 +#define EGL_BAD_NATIVE_PIXMAP 0x300A +#define EGL_BAD_NATIVE_WINDOW 0x300B +#define EGL_BAD_PARAMETER 0x300C +#define EGL_BAD_SURFACE 0x300D +#define EGL_BLUE_SIZE 0x3022 +#define EGL_BUFFER_SIZE 0x3020 +#define EGL_CONFIG_CAVEAT 0x3027 +#define EGL_CONFIG_ID 0x3028 +#define EGL_CORE_NATIVE_ENGINE 0x305B +#define EGL_DEPTH_SIZE 0x3025 +#define EGL_DONT_CARE EGL_CAST(EGLint,-1) +#define EGL_DRAW 0x3059 +#define EGL_EXTENSIONS 0x3055 +#define EGL_FALSE 0 +#define EGL_GREEN_SIZE 0x3023 +#define EGL_HEIGHT 0x3056 +#define EGL_LARGEST_PBUFFER 0x3058 +#define EGL_LEVEL 0x3029 +#define EGL_MAX_PBUFFER_HEIGHT 0x302A +#define EGL_MAX_PBUFFER_PIXELS 0x302B +#define EGL_MAX_PBUFFER_WIDTH 0x302C +#define EGL_NATIVE_RENDERABLE 0x302D +#define EGL_NATIVE_VISUAL_ID 0x302E +#define EGL_NATIVE_VISUAL_TYPE 0x302F +#define EGL_NONE 0x3038 +#define EGL_NON_CONFORMANT_CONFIG 0x3051 +#define EGL_NOT_INITIALIZED 0x3001 +#define EGL_NO_CONTEXT EGL_CAST(EGLContext,0) +#define EGL_NO_DISPLAY EGL_CAST(EGLDisplay,0) +#define EGL_NO_SURFACE EGL_CAST(EGLSurface,0) +#define EGL_PBUFFER_BIT 0x0001 +#define EGL_PIXMAP_BIT 0x0002 +#define EGL_READ 0x305A +#define EGL_RED_SIZE 0x3024 +#define EGL_SAMPLES 0x3031 +#define EGL_SAMPLE_BUFFERS 0x3032 +#define EGL_SLOW_CONFIG 0x3050 +#define EGL_STENCIL_SIZE 0x3026 +#define EGL_SUCCESS 0x3000 +#define EGL_SURFACE_TYPE 0x3033 +#define EGL_TRANSPARENT_BLUE_VALUE 0x3035 +#define EGL_TRANSPARENT_GREEN_VALUE 0x3036 +#define EGL_TRANSPARENT_RED_VALUE 0x3037 +#define EGL_TRANSPARENT_RGB 0x3052 +#define EGL_TRANSPARENT_TYPE 0x3034 +#define EGL_TRUE 1 +#define EGL_VENDOR 0x3053 +#define EGL_VERSION 0x3054 +#define EGL_WIDTH 0x3057 +#define EGL_WINDOW_BIT 0x0004 +#ifndef EGL_NO_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglChooseConfig (EGLDisplay dpy, const EGLint *attrib_list, EGLConfig *configs, EGLint config_size, EGLint *num_config); +EGLAPI EGLBoolean EGLAPIENTRY eglCopyBuffers (EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target); +EGLAPI EGLContext EGLAPIENTRY eglCreateContext (EGLDisplay dpy, EGLConfig config, EGLContext share_context, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePbufferSurface (EGLDisplay dpy, EGLConfig config, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePixmapSurface (EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreateWindowSurface (EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroyContext (EGLDisplay dpy, EGLContext ctx); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroySurface (EGLDisplay dpy, EGLSurface surface); +EGLAPI EGLBoolean EGLAPIENTRY eglGetConfigAttrib (EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint *value); +EGLAPI EGLBoolean EGLAPIENTRY eglGetConfigs (EGLDisplay dpy, EGLConfig *configs, EGLint config_size, EGLint *num_config); +EGLAPI EGLDisplay EGLAPIENTRY eglGetCurrentDisplay (void); +EGLAPI EGLSurface EGLAPIENTRY eglGetCurrentSurface (EGLint readdraw); +EGLAPI EGLDisplay EGLAPIENTRY eglGetDisplay (EGLNativeDisplayType display_id); +EGLAPI EGLint EGLAPIENTRY eglGetError (void); +EGLAPI __eglMustCastToProperFunctionPointerType EGLAPIENTRY eglGetProcAddress (const char *procname); +EGLAPI EGLBoolean EGLAPIENTRY eglInitialize (EGLDisplay dpy, EGLint *major, EGLint *minor); +EGLAPI EGLBoolean EGLAPIENTRY eglMakeCurrent (EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryContext (EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint *value); +EGLAPI const char *EGLAPIENTRY eglQueryString (EGLDisplay dpy, EGLint name); +EGLAPI EGLBoolean EGLAPIENTRY eglQuerySurface (EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint *value); +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffers (EGLDisplay dpy, EGLSurface surface); +EGLAPI EGLBoolean EGLAPIENTRY eglTerminate (EGLDisplay dpy); +EGLAPI EGLBoolean EGLAPIENTRY eglWaitGL (void); +EGLAPI EGLBoolean EGLAPIENTRY eglWaitNative (EGLint engine); +#endif +#endif /* EGL_VERSION_1_0 */ + +#ifndef EGL_VERSION_1_1 +#define EGL_VERSION_1_1 1 +#define EGL_BACK_BUFFER 0x3084 +#define EGL_BIND_TO_TEXTURE_RGB 0x3039 +#define EGL_BIND_TO_TEXTURE_RGBA 0x303A +#define EGL_CONTEXT_LOST 0x300E +#define EGL_MIN_SWAP_INTERVAL 0x303B +#define EGL_MAX_SWAP_INTERVAL 0x303C +#define EGL_MIPMAP_TEXTURE 0x3082 +#define EGL_MIPMAP_LEVEL 0x3083 +#define EGL_NO_TEXTURE 0x305C +#define EGL_TEXTURE_2D 0x305F +#define EGL_TEXTURE_FORMAT 0x3080 +#define EGL_TEXTURE_RGB 0x305D +#define EGL_TEXTURE_RGBA 0x305E +#define EGL_TEXTURE_TARGET 0x3081 +#ifndef EGL_NO_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglBindTexImage (EGLDisplay dpy, EGLSurface surface, EGLint buffer); +EGLAPI EGLBoolean EGLAPIENTRY eglReleaseTexImage (EGLDisplay dpy, EGLSurface surface, EGLint buffer); +EGLAPI EGLBoolean EGLAPIENTRY eglSurfaceAttrib (EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value); +EGLAPI EGLBoolean EGLAPIENTRY eglSwapInterval (EGLDisplay dpy, EGLint interval); +#endif +#endif /* EGL_VERSION_1_1 */ + +#ifndef EGL_VERSION_1_2 +#define EGL_VERSION_1_2 1 +typedef unsigned int EGLenum; +typedef void *EGLClientBuffer; +#define EGL_ALPHA_FORMAT 0x3088 +#define EGL_ALPHA_FORMAT_NONPRE 0x308B +#define EGL_ALPHA_FORMAT_PRE 0x308C +#define EGL_ALPHA_MASK_SIZE 0x303E +#define EGL_BUFFER_PRESERVED 0x3094 +#define EGL_BUFFER_DESTROYED 0x3095 +#define EGL_CLIENT_APIS 0x308D +#define EGL_COLORSPACE 0x3087 +#define EGL_COLORSPACE_sRGB 0x3089 +#define EGL_COLORSPACE_LINEAR 0x308A +#define EGL_COLOR_BUFFER_TYPE 0x303F +#define EGL_CONTEXT_CLIENT_TYPE 0x3097 +#define EGL_DISPLAY_SCALING 10000 +#define EGL_HORIZONTAL_RESOLUTION 0x3090 +#define EGL_LUMINANCE_BUFFER 0x308F +#define EGL_LUMINANCE_SIZE 0x303D +#define EGL_OPENGL_ES_BIT 0x0001 +#define EGL_OPENVG_BIT 0x0002 +#define EGL_OPENGL_ES_API 0x30A0 +#define EGL_OPENVG_API 0x30A1 +#define EGL_OPENVG_IMAGE 0x3096 +#define EGL_PIXEL_ASPECT_RATIO 0x3092 +#define EGL_RENDERABLE_TYPE 0x3040 +#define EGL_RENDER_BUFFER 0x3086 +#define EGL_RGB_BUFFER 0x308E +#define EGL_SINGLE_BUFFER 0x3085 +#define EGL_SWAP_BEHAVIOR 0x3093 +#define EGL_UNKNOWN EGL_CAST(EGLint,-1) +#define EGL_VERTICAL_RESOLUTION 0x3091 +#ifndef EGL_NO_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglBindAPI (EGLenum api); +EGLAPI EGLenum EGLAPIENTRY eglQueryAPI (void); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePbufferFromClientBuffer (EGLDisplay dpy, EGLenum buftype, EGLClientBuffer buffer, EGLConfig config, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglReleaseThread (void); +EGLAPI EGLBoolean EGLAPIENTRY eglWaitClient (void); +#endif +#endif /* EGL_VERSION_1_2 */ + +#ifndef EGL_VERSION_1_3 +#define EGL_VERSION_1_3 1 +#define EGL_CONFORMANT 0x3042 +#define EGL_CONTEXT_CLIENT_VERSION 0x3098 +#define EGL_MATCH_NATIVE_PIXMAP 0x3041 +#define EGL_OPENGL_ES2_BIT 0x0004 +#define EGL_VG_ALPHA_FORMAT 0x3088 +#define EGL_VG_ALPHA_FORMAT_NONPRE 0x308B +#define EGL_VG_ALPHA_FORMAT_PRE 0x308C +#define EGL_VG_ALPHA_FORMAT_PRE_BIT 0x0040 +#define EGL_VG_COLORSPACE 0x3087 +#define EGL_VG_COLORSPACE_sRGB 0x3089 +#define EGL_VG_COLORSPACE_LINEAR 0x308A +#define EGL_VG_COLORSPACE_LINEAR_BIT 0x0020 +#endif /* EGL_VERSION_1_3 */ + +#ifndef EGL_VERSION_1_4 +#define EGL_VERSION_1_4 1 +#define EGL_DEFAULT_DISPLAY EGL_CAST(EGLNativeDisplayType,0) +#define EGL_MULTISAMPLE_RESOLVE_BOX_BIT 0x0200 +#define EGL_MULTISAMPLE_RESOLVE 0x3099 +#define EGL_MULTISAMPLE_RESOLVE_DEFAULT 0x309A +#define EGL_MULTISAMPLE_RESOLVE_BOX 0x309B +#define EGL_OPENGL_API 0x30A2 +#define EGL_OPENGL_BIT 0x0008 +#define EGL_SWAP_BEHAVIOR_PRESERVED_BIT 0x0400 +#ifndef EGL_NO_PROTOTYPES +EGLAPI EGLContext EGLAPIENTRY eglGetCurrentContext (void); +#endif +#endif /* EGL_VERSION_1_4 */ + +#ifndef EGL_VERSION_1_5 +#define EGL_VERSION_1_5 1 +typedef void *EGLSync; +typedef intptr_t EGLAttrib; +typedef khronos_utime_nanoseconds_t EGLTime; +typedef void *EGLImage; +#define EGL_CONTEXT_MAJOR_VERSION 0x3098 +#define EGL_CONTEXT_MINOR_VERSION 0x30FB +#define EGL_CONTEXT_OPENGL_PROFILE_MASK 0x30FD +#define EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY 0x31BD +#define EGL_NO_RESET_NOTIFICATION 0x31BE +#define EGL_LOSE_CONTEXT_ON_RESET 0x31BF +#define EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT 0x00000001 +#define EGL_CONTEXT_OPENGL_COMPATIBILITY_PROFILE_BIT 0x00000002 +#define EGL_CONTEXT_OPENGL_DEBUG 0x31B0 +#define EGL_CONTEXT_OPENGL_FORWARD_COMPATIBLE 0x31B1 +#define EGL_CONTEXT_OPENGL_ROBUST_ACCESS 0x31B2 +#define EGL_OPENGL_ES3_BIT 0x00000040 +#define EGL_CL_EVENT_HANDLE 0x309C +#define EGL_SYNC_CL_EVENT 0x30FE +#define EGL_SYNC_CL_EVENT_COMPLETE 0x30FF +#define EGL_SYNC_PRIOR_COMMANDS_COMPLETE 0x30F0 +#define EGL_SYNC_TYPE 0x30F7 +#define EGL_SYNC_STATUS 0x30F1 +#define EGL_SYNC_CONDITION 0x30F8 +#define EGL_SIGNALED 0x30F2 +#define EGL_UNSIGNALED 0x30F3 +#define EGL_SYNC_FLUSH_COMMANDS_BIT 0x0001 +#define EGL_FOREVER 0xFFFFFFFFFFFFFFFFull +#define EGL_TIMEOUT_EXPIRED 0x30F5 +#define EGL_CONDITION_SATISFIED 0x30F6 +#define EGL_NO_SYNC EGL_CAST(EGLSync,0) +#define EGL_SYNC_FENCE 0x30F9 +#define EGL_GL_COLORSPACE 0x309D +#define EGL_GL_COLORSPACE_SRGB 0x3089 +#define EGL_GL_COLORSPACE_LINEAR 0x308A +#define EGL_GL_RENDERBUFFER 0x30B9 +#define EGL_GL_TEXTURE_2D 0x30B1 +#define EGL_GL_TEXTURE_LEVEL 0x30BC +#define EGL_GL_TEXTURE_3D 0x30B2 +#define EGL_GL_TEXTURE_ZOFFSET 0x30BD +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x30B3 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x30B4 +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x30B5 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x30B6 +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x30B7 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x30B8 +#define EGL_IMAGE_PRESERVED 0x30D2 +#define EGL_NO_IMAGE EGL_CAST(EGLImage,0) +#ifndef EGL_NO_PROTOTYPES +EGLAPI EGLSync EGLAPIENTRY eglCreateSync (EGLDisplay dpy, EGLenum type, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroySync (EGLDisplay dpy, EGLSync sync); +EGLAPI EGLint EGLAPIENTRY eglClientWaitSync (EGLDisplay dpy, EGLSync sync, EGLint flags, EGLTime timeout); +EGLAPI EGLBoolean EGLAPIENTRY eglGetSyncAttrib (EGLDisplay dpy, EGLSync sync, EGLint attribute, EGLAttrib *value); +EGLAPI EGLImage EGLAPIENTRY eglCreateImage (EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroyImage (EGLDisplay dpy, EGLImage image); +EGLAPI EGLDisplay EGLAPIENTRY eglGetPlatformDisplay (EGLenum platform, void *native_display, const EGLAttrib *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePlatformWindowSurface (EGLDisplay dpy, EGLConfig config, void *native_window, const EGLAttrib *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePlatformPixmapSurface (EGLDisplay dpy, EGLConfig config, void *native_pixmap, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglWaitSync (EGLDisplay dpy, EGLSync sync, EGLint flags); +#endif +#endif /* EGL_VERSION_1_5 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/EGL/eglext.h b/sources/pvrsdk/Include/EGL/eglext.h new file mode 100755 index 00000000..ded794a2 --- /dev/null +++ b/sources/pvrsdk/Include/EGL/eglext.h @@ -0,0 +1,1241 @@ +#ifndef __eglext_h_ +#define __eglext_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** http://www.khronos.org/registry/egl +** +** Khronos $Git commit SHA1: a732b061e7 $ on $Git commit date: 2017-06-17 23:27:53 +0100 $ +*/ + +#include "EGL/eglplatform.h" + +#define EGL_EGLEXT_VERSION 20170627 + +/* Generated C header for: + * API: egl + * Versions considered: .* + * Versions emitted: _nomatch_^ + * Default extensions included: egl + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef EGL_KHR_cl_event +#define EGL_KHR_cl_event 1 +#define EGL_CL_EVENT_HANDLE_KHR 0x309C +#define EGL_SYNC_CL_EVENT_KHR 0x30FE +#define EGL_SYNC_CL_EVENT_COMPLETE_KHR 0x30FF +#endif /* EGL_KHR_cl_event */ + +#ifndef EGL_KHR_cl_event2 +#define EGL_KHR_cl_event2 1 +typedef void *EGLSyncKHR; +typedef intptr_t EGLAttribKHR; +typedef EGLSyncKHR (EGLAPIENTRYP PFNEGLCREATESYNC64KHRPROC) (EGLDisplay dpy, EGLenum type, const EGLAttribKHR *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSyncKHR EGLAPIENTRY eglCreateSync64KHR (EGLDisplay dpy, EGLenum type, const EGLAttribKHR *attrib_list); +#endif +#endif /* EGL_KHR_cl_event2 */ + +#ifndef EGL_KHR_client_get_all_proc_addresses +#define EGL_KHR_client_get_all_proc_addresses 1 +#endif /* EGL_KHR_client_get_all_proc_addresses */ + +#ifndef EGL_KHR_config_attribs +#define EGL_KHR_config_attribs 1 +#define EGL_CONFORMANT_KHR 0x3042 +#define EGL_VG_COLORSPACE_LINEAR_BIT_KHR 0x0020 +#define EGL_VG_ALPHA_FORMAT_PRE_BIT_KHR 0x0040 +#endif /* EGL_KHR_config_attribs */ + +#ifndef EGL_KHR_context_flush_control +#define EGL_KHR_context_flush_control 1 +#define EGL_CONTEXT_RELEASE_BEHAVIOR_NONE_KHR 0 +#define EGL_CONTEXT_RELEASE_BEHAVIOR_KHR 0x2097 +#define EGL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR 0x2098 +#endif /* EGL_KHR_context_flush_control */ + +#ifndef EGL_KHR_create_context +#define EGL_KHR_create_context 1 +#define EGL_CONTEXT_MAJOR_VERSION_KHR 0x3098 +#define EGL_CONTEXT_MINOR_VERSION_KHR 0x30FB +#define EGL_CONTEXT_FLAGS_KHR 0x30FC +#define EGL_CONTEXT_OPENGL_PROFILE_MASK_KHR 0x30FD +#define EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR 0x31BD +#define EGL_NO_RESET_NOTIFICATION_KHR 0x31BE +#define EGL_LOSE_CONTEXT_ON_RESET_KHR 0x31BF +#define EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR 0x00000001 +#define EGL_CONTEXT_OPENGL_FORWARD_COMPATIBLE_BIT_KHR 0x00000002 +#define EGL_CONTEXT_OPENGL_ROBUST_ACCESS_BIT_KHR 0x00000004 +#define EGL_CONTEXT_OPENGL_CORE_PROFILE_BIT_KHR 0x00000001 +#define EGL_CONTEXT_OPENGL_COMPATIBILITY_PROFILE_BIT_KHR 0x00000002 +#define EGL_OPENGL_ES3_BIT_KHR 0x00000040 +#endif /* EGL_KHR_create_context */ + +#ifndef EGL_KHR_create_context_no_error +#define EGL_KHR_create_context_no_error 1 +#define EGL_CONTEXT_OPENGL_NO_ERROR_KHR 0x31B3 +#endif /* EGL_KHR_create_context_no_error */ + +#ifndef EGL_KHR_debug +#define EGL_KHR_debug 1 +typedef void *EGLLabelKHR; +typedef void *EGLObjectKHR; +typedef void (EGLAPIENTRY *EGLDEBUGPROCKHR)(EGLenum error,const char *command,EGLint messageType,EGLLabelKHR threadLabel,EGLLabelKHR objectLabel,const char* message); +#define EGL_OBJECT_THREAD_KHR 0x33B0 +#define EGL_OBJECT_DISPLAY_KHR 0x33B1 +#define EGL_OBJECT_CONTEXT_KHR 0x33B2 +#define EGL_OBJECT_SURFACE_KHR 0x33B3 +#define EGL_OBJECT_IMAGE_KHR 0x33B4 +#define EGL_OBJECT_SYNC_KHR 0x33B5 +#define EGL_OBJECT_STREAM_KHR 0x33B6 +#define EGL_DEBUG_MSG_CRITICAL_KHR 0x33B9 +#define EGL_DEBUG_MSG_ERROR_KHR 0x33BA +#define EGL_DEBUG_MSG_WARN_KHR 0x33BB +#define EGL_DEBUG_MSG_INFO_KHR 0x33BC +#define EGL_DEBUG_CALLBACK_KHR 0x33B8 +typedef EGLint (EGLAPIENTRYP PFNEGLDEBUGMESSAGECONTROLKHRPROC) (EGLDEBUGPROCKHR callback, const EGLAttrib *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDEBUGKHRPROC) (EGLint attribute, EGLAttrib *value); +typedef EGLint (EGLAPIENTRYP PFNEGLLABELOBJECTKHRPROC) (EGLDisplay display, EGLenum objectType, EGLObjectKHR object, EGLLabelKHR label); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLint EGLAPIENTRY eglDebugMessageControlKHR (EGLDEBUGPROCKHR callback, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDebugKHR (EGLint attribute, EGLAttrib *value); +EGLAPI EGLint EGLAPIENTRY eglLabelObjectKHR (EGLDisplay display, EGLenum objectType, EGLObjectKHR object, EGLLabelKHR label); +#endif +#endif /* EGL_KHR_debug */ + +#ifndef EGL_KHR_display_reference +#define EGL_KHR_display_reference 1 +#define EGL_TRACK_REFERENCES_KHR 0x3352 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDISPLAYATTRIBKHRPROC) (EGLDisplay dpy, EGLint name, EGLAttrib *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDisplayAttribKHR (EGLDisplay dpy, EGLint name, EGLAttrib *value); +#endif +#endif /* EGL_KHR_display_reference */ + +#ifndef EGL_KHR_fence_sync +#define EGL_KHR_fence_sync 1 +typedef khronos_utime_nanoseconds_t EGLTimeKHR; +#ifdef KHRONOS_SUPPORT_INT64 +#define EGL_SYNC_PRIOR_COMMANDS_COMPLETE_KHR 0x30F0 +#define EGL_SYNC_CONDITION_KHR 0x30F8 +#define EGL_SYNC_FENCE_KHR 0x30F9 +typedef EGLSyncKHR (EGLAPIENTRYP PFNEGLCREATESYNCKHRPROC) (EGLDisplay dpy, EGLenum type, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLDESTROYSYNCKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync); +typedef EGLint (EGLAPIENTRYP PFNEGLCLIENTWAITSYNCKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync, EGLint flags, EGLTimeKHR timeout); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETSYNCATTRIBKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync, EGLint attribute, EGLint *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSyncKHR EGLAPIENTRY eglCreateSyncKHR (EGLDisplay dpy, EGLenum type, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroySyncKHR (EGLDisplay dpy, EGLSyncKHR sync); +EGLAPI EGLint EGLAPIENTRY eglClientWaitSyncKHR (EGLDisplay dpy, EGLSyncKHR sync, EGLint flags, EGLTimeKHR timeout); +EGLAPI EGLBoolean EGLAPIENTRY eglGetSyncAttribKHR (EGLDisplay dpy, EGLSyncKHR sync, EGLint attribute, EGLint *value); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_KHR_fence_sync */ + +#ifndef EGL_KHR_get_all_proc_addresses +#define EGL_KHR_get_all_proc_addresses 1 +#endif /* EGL_KHR_get_all_proc_addresses */ + +#ifndef EGL_KHR_gl_colorspace +#define EGL_KHR_gl_colorspace 1 +#define EGL_GL_COLORSPACE_KHR 0x309D +#define EGL_GL_COLORSPACE_SRGB_KHR 0x3089 +#define EGL_GL_COLORSPACE_LINEAR_KHR 0x308A +#endif /* EGL_KHR_gl_colorspace */ + +#ifndef EGL_KHR_gl_renderbuffer_image +#define EGL_KHR_gl_renderbuffer_image 1 +#define EGL_GL_RENDERBUFFER_KHR 0x30B9 +#endif /* EGL_KHR_gl_renderbuffer_image */ + +#ifndef EGL_KHR_gl_texture_2D_image +#define EGL_KHR_gl_texture_2D_image 1 +#define EGL_GL_TEXTURE_2D_KHR 0x30B1 +#define EGL_GL_TEXTURE_LEVEL_KHR 0x30BC +#endif /* EGL_KHR_gl_texture_2D_image */ + +#ifndef EGL_KHR_gl_texture_3D_image +#define EGL_KHR_gl_texture_3D_image 1 +#define EGL_GL_TEXTURE_3D_KHR 0x30B2 +#define EGL_GL_TEXTURE_ZOFFSET_KHR 0x30BD +#endif /* EGL_KHR_gl_texture_3D_image */ + +#ifndef EGL_KHR_gl_texture_cubemap_image +#define EGL_KHR_gl_texture_cubemap_image 1 +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_X_KHR 0x30B3 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X_KHR 0x30B4 +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y_KHR 0x30B5 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_KHR 0x30B6 +#define EGL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z_KHR 0x30B7 +#define EGL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_KHR 0x30B8 +#endif /* EGL_KHR_gl_texture_cubemap_image */ + +#ifndef EGL_KHR_image +#define EGL_KHR_image 1 +typedef void *EGLImageKHR; +#define EGL_NATIVE_PIXMAP_KHR 0x30B0 +#define EGL_NO_IMAGE_KHR EGL_CAST(EGLImageKHR,0) +typedef EGLImageKHR (EGLAPIENTRYP PFNEGLCREATEIMAGEKHRPROC) (EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLDESTROYIMAGEKHRPROC) (EGLDisplay dpy, EGLImageKHR image); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLImageKHR EGLAPIENTRY eglCreateImageKHR (EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroyImageKHR (EGLDisplay dpy, EGLImageKHR image); +#endif +#endif /* EGL_KHR_image */ + +#ifndef EGL_KHR_image_base +#define EGL_KHR_image_base 1 +#define EGL_IMAGE_PRESERVED_KHR 0x30D2 +#endif /* EGL_KHR_image_base */ + +#ifndef EGL_KHR_image_pixmap +#define EGL_KHR_image_pixmap 1 +#endif /* EGL_KHR_image_pixmap */ + +#ifndef EGL_KHR_lock_surface +#define EGL_KHR_lock_surface 1 +#define EGL_READ_SURFACE_BIT_KHR 0x0001 +#define EGL_WRITE_SURFACE_BIT_KHR 0x0002 +#define EGL_LOCK_SURFACE_BIT_KHR 0x0080 +#define EGL_OPTIMAL_FORMAT_BIT_KHR 0x0100 +#define EGL_MATCH_FORMAT_KHR 0x3043 +#define EGL_FORMAT_RGB_565_EXACT_KHR 0x30C0 +#define EGL_FORMAT_RGB_565_KHR 0x30C1 +#define EGL_FORMAT_RGBA_8888_EXACT_KHR 0x30C2 +#define EGL_FORMAT_RGBA_8888_KHR 0x30C3 +#define EGL_MAP_PRESERVE_PIXELS_KHR 0x30C4 +#define EGL_LOCK_USAGE_HINT_KHR 0x30C5 +#define EGL_BITMAP_POINTER_KHR 0x30C6 +#define EGL_BITMAP_PITCH_KHR 0x30C7 +#define EGL_BITMAP_ORIGIN_KHR 0x30C8 +#define EGL_BITMAP_PIXEL_RED_OFFSET_KHR 0x30C9 +#define EGL_BITMAP_PIXEL_GREEN_OFFSET_KHR 0x30CA +#define EGL_BITMAP_PIXEL_BLUE_OFFSET_KHR 0x30CB +#define EGL_BITMAP_PIXEL_ALPHA_OFFSET_KHR 0x30CC +#define EGL_BITMAP_PIXEL_LUMINANCE_OFFSET_KHR 0x30CD +#define EGL_LOWER_LEFT_KHR 0x30CE +#define EGL_UPPER_LEFT_KHR 0x30CF +typedef EGLBoolean (EGLAPIENTRYP PFNEGLLOCKSURFACEKHRPROC) (EGLDisplay dpy, EGLSurface surface, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLUNLOCKSURFACEKHRPROC) (EGLDisplay dpy, EGLSurface surface); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglLockSurfaceKHR (EGLDisplay dpy, EGLSurface surface, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglUnlockSurfaceKHR (EGLDisplay dpy, EGLSurface surface); +#endif +#endif /* EGL_KHR_lock_surface */ + +#ifndef EGL_KHR_lock_surface2 +#define EGL_KHR_lock_surface2 1 +#define EGL_BITMAP_PIXEL_SIZE_KHR 0x3110 +#endif /* EGL_KHR_lock_surface2 */ + +#ifndef EGL_KHR_lock_surface3 +#define EGL_KHR_lock_surface3 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSURFACE64KHRPROC) (EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLAttribKHR *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQuerySurface64KHR (EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLAttribKHR *value); +#endif +#endif /* EGL_KHR_lock_surface3 */ + +#ifndef EGL_KHR_mutable_render_buffer +#define EGL_KHR_mutable_render_buffer 1 +#define EGL_MUTABLE_RENDER_BUFFER_BIT_KHR 0x1000 +#endif /* EGL_KHR_mutable_render_buffer */ + +#ifndef EGL_KHR_no_config_context +#define EGL_KHR_no_config_context 1 +#define EGL_NO_CONFIG_KHR EGL_CAST(EGLConfig,0) +#endif /* EGL_KHR_no_config_context */ + +#ifndef EGL_KHR_partial_update +#define EGL_KHR_partial_update 1 +#define EGL_BUFFER_AGE_KHR 0x313D +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSETDAMAGEREGIONKHRPROC) (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSetDamageRegionKHR (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#endif +#endif /* EGL_KHR_partial_update */ + +#ifndef EGL_KHR_platform_android +#define EGL_KHR_platform_android 1 +#define EGL_PLATFORM_ANDROID_KHR 0x3141 +#endif /* EGL_KHR_platform_android */ + +#ifndef EGL_KHR_platform_gbm +#define EGL_KHR_platform_gbm 1 +#define EGL_PLATFORM_GBM_KHR 0x31D7 +#endif /* EGL_KHR_platform_gbm */ + +#ifndef EGL_KHR_platform_wayland +#define EGL_KHR_platform_wayland 1 +#define EGL_PLATFORM_WAYLAND_KHR 0x31D8 +#endif /* EGL_KHR_platform_wayland */ + +#ifndef EGL_KHR_platform_x11 +#define EGL_KHR_platform_x11 1 +#define EGL_PLATFORM_X11_KHR 0x31D5 +#define EGL_PLATFORM_X11_SCREEN_KHR 0x31D6 +#endif /* EGL_KHR_platform_x11 */ + +#ifndef EGL_KHR_reusable_sync +#define EGL_KHR_reusable_sync 1 +#ifdef KHRONOS_SUPPORT_INT64 +#define EGL_SYNC_STATUS_KHR 0x30F1 +#define EGL_SIGNALED_KHR 0x30F2 +#define EGL_UNSIGNALED_KHR 0x30F3 +#define EGL_TIMEOUT_EXPIRED_KHR 0x30F5 +#define EGL_CONDITION_SATISFIED_KHR 0x30F6 +#define EGL_SYNC_TYPE_KHR 0x30F7 +#define EGL_SYNC_REUSABLE_KHR 0x30FA +#define EGL_SYNC_FLUSH_COMMANDS_BIT_KHR 0x0001 +#define EGL_FOREVER_KHR 0xFFFFFFFFFFFFFFFFull +#define EGL_NO_SYNC_KHR EGL_CAST(EGLSyncKHR,0) +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSIGNALSYNCKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync, EGLenum mode); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSignalSyncKHR (EGLDisplay dpy, EGLSyncKHR sync, EGLenum mode); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_KHR_reusable_sync */ + +#ifndef EGL_KHR_stream +#define EGL_KHR_stream 1 +typedef void *EGLStreamKHR; +typedef khronos_uint64_t EGLuint64KHR; +#ifdef KHRONOS_SUPPORT_INT64 +#define EGL_NO_STREAM_KHR EGL_CAST(EGLStreamKHR,0) +#define EGL_CONSUMER_LATENCY_USEC_KHR 0x3210 +#define EGL_PRODUCER_FRAME_KHR 0x3212 +#define EGL_CONSUMER_FRAME_KHR 0x3213 +#define EGL_STREAM_STATE_KHR 0x3214 +#define EGL_STREAM_STATE_CREATED_KHR 0x3215 +#define EGL_STREAM_STATE_CONNECTING_KHR 0x3216 +#define EGL_STREAM_STATE_EMPTY_KHR 0x3217 +#define EGL_STREAM_STATE_NEW_FRAME_AVAILABLE_KHR 0x3218 +#define EGL_STREAM_STATE_OLD_FRAME_AVAILABLE_KHR 0x3219 +#define EGL_STREAM_STATE_DISCONNECTED_KHR 0x321A +#define EGL_BAD_STREAM_KHR 0x321B +#define EGL_BAD_STATE_KHR 0x321C +typedef EGLStreamKHR (EGLAPIENTRYP PFNEGLCREATESTREAMKHRPROC) (EGLDisplay dpy, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLDESTROYSTREAMKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint *value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMU64KHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLuint64KHR *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLStreamKHR EGLAPIENTRY eglCreateStreamKHR (EGLDisplay dpy, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroyStreamKHR (EGLDisplay dpy, EGLStreamKHR stream); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLint *value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamu64KHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLuint64KHR *value); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_KHR_stream */ + +#ifndef EGL_KHR_stream_attrib +#define EGL_KHR_stream_attrib 1 +#ifdef KHRONOS_SUPPORT_INT64 +typedef EGLStreamKHR (EGLAPIENTRYP PFNEGLCREATESTREAMATTRIBKHRPROC) (EGLDisplay dpy, const EGLAttrib *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSETSTREAMATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib *value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERACQUIREATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERRELEASEATTRIBKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLStreamKHR EGLAPIENTRY eglCreateStreamAttribKHR (EGLDisplay dpy, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglSetStreamAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLAttrib *value); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerAcquireAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerReleaseAttribKHR (EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_KHR_stream_attrib */ + +#ifndef EGL_KHR_stream_consumer_gltexture +#define EGL_KHR_stream_consumer_gltexture 1 +#ifdef EGL_KHR_stream +#define EGL_CONSUMER_ACQUIRE_TIMEOUT_USEC_KHR 0x321E +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERGLTEXTUREEXTERNALKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERACQUIREKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERRELEASEKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerGLTextureExternalKHR (EGLDisplay dpy, EGLStreamKHR stream); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerAcquireKHR (EGLDisplay dpy, EGLStreamKHR stream); +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerReleaseKHR (EGLDisplay dpy, EGLStreamKHR stream); +#endif +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_consumer_gltexture */ + +#ifndef EGL_KHR_stream_cross_process_fd +#define EGL_KHR_stream_cross_process_fd 1 +typedef int EGLNativeFileDescriptorKHR; +#ifdef EGL_KHR_stream +#define EGL_NO_FILE_DESCRIPTOR_KHR EGL_CAST(EGLNativeFileDescriptorKHR,-1) +typedef EGLNativeFileDescriptorKHR (EGLAPIENTRYP PFNEGLGETSTREAMFILEDESCRIPTORKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream); +typedef EGLStreamKHR (EGLAPIENTRYP PFNEGLCREATESTREAMFROMFILEDESCRIPTORKHRPROC) (EGLDisplay dpy, EGLNativeFileDescriptorKHR file_descriptor); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLNativeFileDescriptorKHR EGLAPIENTRY eglGetStreamFileDescriptorKHR (EGLDisplay dpy, EGLStreamKHR stream); +EGLAPI EGLStreamKHR EGLAPIENTRY eglCreateStreamFromFileDescriptorKHR (EGLDisplay dpy, EGLNativeFileDescriptorKHR file_descriptor); +#endif +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_cross_process_fd */ + +#ifndef EGL_KHR_stream_fifo +#define EGL_KHR_stream_fifo 1 +#ifdef EGL_KHR_stream +#define EGL_STREAM_FIFO_LENGTH_KHR 0x31FC +#define EGL_STREAM_TIME_NOW_KHR 0x31FD +#define EGL_STREAM_TIME_CONSUMER_KHR 0x31FE +#define EGL_STREAM_TIME_PRODUCER_KHR 0x31FF +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMTIMEKHRPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLTimeKHR *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamTimeKHR (EGLDisplay dpy, EGLStreamKHR stream, EGLenum attribute, EGLTimeKHR *value); +#endif +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_fifo */ + +#ifndef EGL_KHR_stream_producer_aldatalocator +#define EGL_KHR_stream_producer_aldatalocator 1 +#ifdef EGL_KHR_stream +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_producer_aldatalocator */ + +#ifndef EGL_KHR_stream_producer_eglsurface +#define EGL_KHR_stream_producer_eglsurface 1 +#ifdef EGL_KHR_stream +#define EGL_STREAM_BIT_KHR 0x0800 +typedef EGLSurface (EGLAPIENTRYP PFNEGLCREATESTREAMPRODUCERSURFACEKHRPROC) (EGLDisplay dpy, EGLConfig config, EGLStreamKHR stream, const EGLint *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSurface EGLAPIENTRY eglCreateStreamProducerSurfaceKHR (EGLDisplay dpy, EGLConfig config, EGLStreamKHR stream, const EGLint *attrib_list); +#endif +#endif /* EGL_KHR_stream */ +#endif /* EGL_KHR_stream_producer_eglsurface */ + +#ifndef EGL_KHR_surfaceless_context +#define EGL_KHR_surfaceless_context 1 +#endif /* EGL_KHR_surfaceless_context */ + +#ifndef EGL_KHR_swap_buffers_with_damage +#define EGL_KHR_swap_buffers_with_damage 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSWAPBUFFERSWITHDAMAGEKHRPROC) (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffersWithDamageKHR (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#endif +#endif /* EGL_KHR_swap_buffers_with_damage */ + +#ifndef EGL_KHR_vg_parent_image +#define EGL_KHR_vg_parent_image 1 +#define EGL_VG_PARENT_IMAGE_KHR 0x30BA +#endif /* EGL_KHR_vg_parent_image */ + +#ifndef EGL_KHR_wait_sync +#define EGL_KHR_wait_sync 1 +typedef EGLint (EGLAPIENTRYP PFNEGLWAITSYNCKHRPROC) (EGLDisplay dpy, EGLSyncKHR sync, EGLint flags); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLint EGLAPIENTRY eglWaitSyncKHR (EGLDisplay dpy, EGLSyncKHR sync, EGLint flags); +#endif +#endif /* EGL_KHR_wait_sync */ + +#ifndef EGL_ANDROID_blob_cache +#define EGL_ANDROID_blob_cache 1 +typedef khronos_ssize_t EGLsizeiANDROID; +typedef void (*EGLSetBlobFuncANDROID) (const void *key, EGLsizeiANDROID keySize, const void *value, EGLsizeiANDROID valueSize); +typedef EGLsizeiANDROID (*EGLGetBlobFuncANDROID) (const void *key, EGLsizeiANDROID keySize, void *value, EGLsizeiANDROID valueSize); +typedef void (EGLAPIENTRYP PFNEGLSETBLOBCACHEFUNCSANDROIDPROC) (EGLDisplay dpy, EGLSetBlobFuncANDROID set, EGLGetBlobFuncANDROID get); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI void EGLAPIENTRY eglSetBlobCacheFuncsANDROID (EGLDisplay dpy, EGLSetBlobFuncANDROID set, EGLGetBlobFuncANDROID get); +#endif +#endif /* EGL_ANDROID_blob_cache */ + +#ifndef EGL_ANDROID_create_native_client_buffer +#define EGL_ANDROID_create_native_client_buffer 1 +#define EGL_NATIVE_BUFFER_USAGE_ANDROID 0x3143 +#define EGL_NATIVE_BUFFER_USAGE_PROTECTED_BIT_ANDROID 0x00000001 +#define EGL_NATIVE_BUFFER_USAGE_RENDERBUFFER_BIT_ANDROID 0x00000002 +#define EGL_NATIVE_BUFFER_USAGE_TEXTURE_BIT_ANDROID 0x00000004 +typedef EGLClientBuffer (EGLAPIENTRYP PFNEGLCREATENATIVECLIENTBUFFERANDROIDPROC) (const EGLint *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLClientBuffer EGLAPIENTRY eglCreateNativeClientBufferANDROID (const EGLint *attrib_list); +#endif +#endif /* EGL_ANDROID_create_native_client_buffer */ + +#ifndef EGL_ANDROID_framebuffer_target +#define EGL_ANDROID_framebuffer_target 1 +#define EGL_FRAMEBUFFER_TARGET_ANDROID 0x3147 +#endif /* EGL_ANDROID_framebuffer_target */ + +#ifndef EGL_ANDROID_front_buffer_auto_refresh +#define EGL_ANDROID_front_buffer_auto_refresh 1 +#define EGL_FRONT_BUFFER_AUTO_REFRESH_ANDROID 0x314C +#endif /* EGL_ANDROID_front_buffer_auto_refresh */ + +#ifndef EGL_ANDROID_image_native_buffer +#define EGL_ANDROID_image_native_buffer 1 +#define EGL_NATIVE_BUFFER_ANDROID 0x3140 +#endif /* EGL_ANDROID_image_native_buffer */ + +#ifndef EGL_ANDROID_native_fence_sync +#define EGL_ANDROID_native_fence_sync 1 +#define EGL_SYNC_NATIVE_FENCE_ANDROID 0x3144 +#define EGL_SYNC_NATIVE_FENCE_FD_ANDROID 0x3145 +#define EGL_SYNC_NATIVE_FENCE_SIGNALED_ANDROID 0x3146 +#define EGL_NO_NATIVE_FENCE_FD_ANDROID -1 +typedef EGLint (EGLAPIENTRYP PFNEGLDUPNATIVEFENCEFDANDROIDPROC) (EGLDisplay dpy, EGLSyncKHR sync); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLint EGLAPIENTRY eglDupNativeFenceFDANDROID (EGLDisplay dpy, EGLSyncKHR sync); +#endif +#endif /* EGL_ANDROID_native_fence_sync */ + +#ifndef EGL_ANDROID_presentation_time +#define EGL_ANDROID_presentation_time 1 +typedef khronos_stime_nanoseconds_t EGLnsecsANDROID; +typedef EGLBoolean (EGLAPIENTRYP PFNEGLPRESENTATIONTIMEANDROIDPROC) (EGLDisplay dpy, EGLSurface surface, EGLnsecsANDROID time); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglPresentationTimeANDROID (EGLDisplay dpy, EGLSurface surface, EGLnsecsANDROID time); +#endif +#endif /* EGL_ANDROID_presentation_time */ + +#ifndef EGL_ANDROID_recordable +#define EGL_ANDROID_recordable 1 +#define EGL_RECORDABLE_ANDROID 0x3142 +#endif /* EGL_ANDROID_recordable */ + +#ifndef EGL_ANGLE_d3d_share_handle_client_buffer +#define EGL_ANGLE_d3d_share_handle_client_buffer 1 +#define EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE 0x3200 +#endif /* EGL_ANGLE_d3d_share_handle_client_buffer */ + +#ifndef EGL_ANGLE_device_d3d +#define EGL_ANGLE_device_d3d 1 +#define EGL_D3D9_DEVICE_ANGLE 0x33A0 +#define EGL_D3D11_DEVICE_ANGLE 0x33A1 +#endif /* EGL_ANGLE_device_d3d */ + +#ifndef EGL_ANGLE_query_surface_pointer +#define EGL_ANGLE_query_surface_pointer 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSURFACEPOINTERANGLEPROC) (EGLDisplay dpy, EGLSurface surface, EGLint attribute, void **value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQuerySurfacePointerANGLE (EGLDisplay dpy, EGLSurface surface, EGLint attribute, void **value); +#endif +#endif /* EGL_ANGLE_query_surface_pointer */ + +#ifndef EGL_ANGLE_surface_d3d_texture_2d_share_handle +#define EGL_ANGLE_surface_d3d_texture_2d_share_handle 1 +#endif /* EGL_ANGLE_surface_d3d_texture_2d_share_handle */ + +#ifndef EGL_ANGLE_window_fixed_size +#define EGL_ANGLE_window_fixed_size 1 +#define EGL_FIXED_SIZE_ANGLE 0x3201 +#endif /* EGL_ANGLE_window_fixed_size */ + +#ifndef EGL_ARM_implicit_external_sync +#define EGL_ARM_implicit_external_sync 1 +#define EGL_SYNC_PRIOR_COMMANDS_IMPLICIT_EXTERNAL_ARM 0x328A +#endif /* EGL_ARM_implicit_external_sync */ + +#ifndef EGL_ARM_pixmap_multisample_discard +#define EGL_ARM_pixmap_multisample_discard 1 +#define EGL_DISCARD_SAMPLES_ARM 0x3286 +#endif /* EGL_ARM_pixmap_multisample_discard */ + +#ifndef EGL_EXT_bind_to_front +#define EGL_EXT_bind_to_front 1 +#define EGL_FRONT_BUFFER_EXT 0x3464 +#endif /* EGL_EXT_bind_to_front */ + +#ifndef EGL_EXT_buffer_age +#define EGL_EXT_buffer_age 1 +#define EGL_BUFFER_AGE_EXT 0x313D +#endif /* EGL_EXT_buffer_age */ + +#ifndef EGL_EXT_client_extensions +#define EGL_EXT_client_extensions 1 +#endif /* EGL_EXT_client_extensions */ + +#ifndef EGL_EXT_compositor +#define EGL_EXT_compositor 1 +#define EGL_PRIMARY_COMPOSITOR_CONTEXT_EXT 0x3460 +#define EGL_EXTERNAL_REF_ID_EXT 0x3461 +#define EGL_COMPOSITOR_DROP_NEWEST_FRAME_EXT 0x3462 +#define EGL_COMPOSITOR_KEEP_NEWEST_FRAME_EXT 0x3463 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETCONTEXTLISTEXTPROC) (const EGLint *external_ref_ids, EGLint num_entries); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETCONTEXTATTRIBUTESEXTPROC) (EGLint external_ref_id, const EGLint *context_attributes, EGLint num_entries); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETWINDOWLISTEXTPROC) (EGLint external_ref_id, const EGLint *external_win_ids, EGLint num_entries); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETWINDOWATTRIBUTESEXTPROC) (EGLint external_win_id, const EGLint *window_attributes, EGLint num_entries); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORBINDTEXWINDOWEXTPROC) (EGLint external_win_id); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSETSIZEEXTPROC) (EGLint external_win_id, EGLint width, EGLint height); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLCOMPOSITORSWAPPOLICYEXTPROC) (EGLint external_win_id, EGLint policy); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetContextListEXT (const EGLint *external_ref_ids, EGLint num_entries); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetContextAttributesEXT (EGLint external_ref_id, const EGLint *context_attributes, EGLint num_entries); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetWindowListEXT (EGLint external_ref_id, const EGLint *external_win_ids, EGLint num_entries); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetWindowAttributesEXT (EGLint external_win_id, const EGLint *window_attributes, EGLint num_entries); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorBindTexWindowEXT (EGLint external_win_id); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSetSizeEXT (EGLint external_win_id, EGLint width, EGLint height); +EGLAPI EGLBoolean EGLAPIENTRY eglCompositorSwapPolicyEXT (EGLint external_win_id, EGLint policy); +#endif +#endif /* EGL_EXT_compositor */ + +#ifndef EGL_EXT_create_context_robustness +#define EGL_EXT_create_context_robustness 1 +#define EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT 0x30BF +#define EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_EXT 0x3138 +#define EGL_NO_RESET_NOTIFICATION_EXT 0x31BE +#define EGL_LOSE_CONTEXT_ON_RESET_EXT 0x31BF +#endif /* EGL_EXT_create_context_robustness */ + +#ifndef EGL_EXT_device_base +#define EGL_EXT_device_base 1 +typedef void *EGLDeviceEXT; +#define EGL_NO_DEVICE_EXT EGL_CAST(EGLDeviceEXT,0) +#define EGL_BAD_DEVICE_EXT 0x322B +#define EGL_DEVICE_EXT 0x322C +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDEVICEATTRIBEXTPROC) (EGLDeviceEXT device, EGLint attribute, EGLAttrib *value); +typedef const char *(EGLAPIENTRYP PFNEGLQUERYDEVICESTRINGEXTPROC) (EGLDeviceEXT device, EGLint name); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDEVICESEXTPROC) (EGLint max_devices, EGLDeviceEXT *devices, EGLint *num_devices); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDISPLAYATTRIBEXTPROC) (EGLDisplay dpy, EGLint attribute, EGLAttrib *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDeviceAttribEXT (EGLDeviceEXT device, EGLint attribute, EGLAttrib *value); +EGLAPI const char *EGLAPIENTRY eglQueryDeviceStringEXT (EGLDeviceEXT device, EGLint name); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDevicesEXT (EGLint max_devices, EGLDeviceEXT *devices, EGLint *num_devices); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDisplayAttribEXT (EGLDisplay dpy, EGLint attribute, EGLAttrib *value); +#endif +#endif /* EGL_EXT_device_base */ + +#ifndef EGL_EXT_device_drm +#define EGL_EXT_device_drm 1 +#define EGL_DRM_DEVICE_FILE_EXT 0x3233 +#endif /* EGL_EXT_device_drm */ + +#ifndef EGL_EXT_device_enumeration +#define EGL_EXT_device_enumeration 1 +#endif /* EGL_EXT_device_enumeration */ + +#ifndef EGL_EXT_device_openwf +#define EGL_EXT_device_openwf 1 +#define EGL_OPENWF_DEVICE_ID_EXT 0x3237 +#endif /* EGL_EXT_device_openwf */ + +#ifndef EGL_EXT_device_query +#define EGL_EXT_device_query 1 +#endif /* EGL_EXT_device_query */ + +#ifndef EGL_EXT_gl_colorspace_bt2020_linear +#define EGL_EXT_gl_colorspace_bt2020_linear 1 +#define EGL_GL_COLORSPACE_BT2020_LINEAR_EXT 0x333F +#endif /* EGL_EXT_gl_colorspace_bt2020_linear */ + +#ifndef EGL_EXT_gl_colorspace_bt2020_pq +#define EGL_EXT_gl_colorspace_bt2020_pq 1 +#define EGL_GL_COLORSPACE_BT2020_PQ_EXT 0x3340 +#endif /* EGL_EXT_gl_colorspace_bt2020_pq */ + +#ifndef EGL_EXT_gl_colorspace_display_p3 +#define EGL_EXT_gl_colorspace_display_p3 1 +#define EGL_GL_COLORSPACE_DISPLAY_P3_EXT 0x3363 +#endif /* EGL_EXT_gl_colorspace_display_p3 */ + +#ifndef EGL_EXT_gl_colorspace_display_p3_linear +#define EGL_EXT_gl_colorspace_display_p3_linear 1 +#define EGL_GL_COLORSPACE_DISPLAY_P3_LINEAR_EXT 0x3362 +#endif /* EGL_EXT_gl_colorspace_display_p3_linear */ + +#ifndef EGL_EXT_gl_colorspace_scrgb +#define EGL_EXT_gl_colorspace_scrgb 1 +#define EGL_GL_COLORSPACE_SCRGB_EXT 0x3351 +#endif /* EGL_EXT_gl_colorspace_scrgb */ + +#ifndef EGL_EXT_gl_colorspace_scrgb_linear +#define EGL_EXT_gl_colorspace_scrgb_linear 1 +#define EGL_GL_COLORSPACE_SCRGB_LINEAR_EXT 0x3350 +#endif /* EGL_EXT_gl_colorspace_scrgb_linear */ + +#ifndef EGL_EXT_image_dma_buf_import +#define EGL_EXT_image_dma_buf_import 1 +#define EGL_LINUX_DMA_BUF_EXT 0x3270 +#define EGL_LINUX_DRM_FOURCC_EXT 0x3271 +#define EGL_DMA_BUF_PLANE0_FD_EXT 0x3272 +#define EGL_DMA_BUF_PLANE0_OFFSET_EXT 0x3273 +#define EGL_DMA_BUF_PLANE0_PITCH_EXT 0x3274 +#define EGL_DMA_BUF_PLANE1_FD_EXT 0x3275 +#define EGL_DMA_BUF_PLANE1_OFFSET_EXT 0x3276 +#define EGL_DMA_BUF_PLANE1_PITCH_EXT 0x3277 +#define EGL_DMA_BUF_PLANE2_FD_EXT 0x3278 +#define EGL_DMA_BUF_PLANE2_OFFSET_EXT 0x3279 +#define EGL_DMA_BUF_PLANE2_PITCH_EXT 0x327A +#define EGL_YUV_COLOR_SPACE_HINT_EXT 0x327B +#define EGL_SAMPLE_RANGE_HINT_EXT 0x327C +#define EGL_YUV_CHROMA_HORIZONTAL_SITING_HINT_EXT 0x327D +#define EGL_YUV_CHROMA_VERTICAL_SITING_HINT_EXT 0x327E +#define EGL_ITU_REC601_EXT 0x327F +#define EGL_ITU_REC709_EXT 0x3280 +#define EGL_ITU_REC2020_EXT 0x3281 +#define EGL_YUV_FULL_RANGE_EXT 0x3282 +#define EGL_YUV_NARROW_RANGE_EXT 0x3283 +#define EGL_YUV_CHROMA_SITING_0_EXT 0x3284 +#define EGL_YUV_CHROMA_SITING_0_5_EXT 0x3285 +#endif /* EGL_EXT_image_dma_buf_import */ + +#ifndef EGL_EXT_image_dma_buf_import_modifiers +#define EGL_EXT_image_dma_buf_import_modifiers 1 +#define EGL_DMA_BUF_PLANE3_FD_EXT 0x3440 +#define EGL_DMA_BUF_PLANE3_OFFSET_EXT 0x3441 +#define EGL_DMA_BUF_PLANE3_PITCH_EXT 0x3442 +#define EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT 0x3443 +#define EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT 0x3444 +#define EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT 0x3445 +#define EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT 0x3446 +#define EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT 0x3447 +#define EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT 0x3448 +#define EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT 0x3449 +#define EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT 0x344A +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDMABUFFORMATSEXTPROC) (EGLDisplay dpy, EGLint max_formats, EGLint *formats, EGLint *num_formats); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDMABUFMODIFIERSEXTPROC) (EGLDisplay dpy, EGLint format, EGLint max_modifiers, EGLuint64KHR *modifiers, EGLBoolean *external_only, EGLint *num_modifiers); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDmaBufFormatsEXT (EGLDisplay dpy, EGLint max_formats, EGLint *formats, EGLint *num_formats); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDmaBufModifiersEXT (EGLDisplay dpy, EGLint format, EGLint max_modifiers, EGLuint64KHR *modifiers, EGLBoolean *external_only, EGLint *num_modifiers); +#endif +#endif /* EGL_EXT_image_dma_buf_import_modifiers */ + +#ifndef EGL_EXT_image_implicit_sync_control +#define EGL_EXT_image_implicit_sync_control 1 +#define EGL_IMPORT_SYNC_TYPE_EXT 0x3470 +#define EGL_IMPORT_IMPLICIT_SYNC_EXT 0x3471 +#define EGL_IMPORT_EXPLICIT_SYNC_EXT 0x3472 +#endif /* EGL_EXT_image_implicit_sync_control */ + +#ifndef EGL_EXT_multiview_window +#define EGL_EXT_multiview_window 1 +#define EGL_MULTIVIEW_VIEW_COUNT_EXT 0x3134 +#endif /* EGL_EXT_multiview_window */ + +#ifndef EGL_EXT_output_base +#define EGL_EXT_output_base 1 +typedef void *EGLOutputLayerEXT; +typedef void *EGLOutputPortEXT; +#define EGL_NO_OUTPUT_LAYER_EXT EGL_CAST(EGLOutputLayerEXT,0) +#define EGL_NO_OUTPUT_PORT_EXT EGL_CAST(EGLOutputPortEXT,0) +#define EGL_BAD_OUTPUT_LAYER_EXT 0x322D +#define EGL_BAD_OUTPUT_PORT_EXT 0x322E +#define EGL_SWAP_INTERVAL_EXT 0x322F +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETOUTPUTLAYERSEXTPROC) (EGLDisplay dpy, const EGLAttrib *attrib_list, EGLOutputLayerEXT *layers, EGLint max_layers, EGLint *num_layers); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETOUTPUTPORTSEXTPROC) (EGLDisplay dpy, const EGLAttrib *attrib_list, EGLOutputPortEXT *ports, EGLint max_ports, EGLint *num_ports); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLOUTPUTLAYERATTRIBEXTPROC) (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYOUTPUTLAYERATTRIBEXTPROC) (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib *value); +typedef const char *(EGLAPIENTRYP PFNEGLQUERYOUTPUTLAYERSTRINGEXTPROC) (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint name); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLOUTPUTPORTATTRIBEXTPROC) (EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYOUTPUTPORTATTRIBEXTPROC) (EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib *value); +typedef const char *(EGLAPIENTRYP PFNEGLQUERYOUTPUTPORTSTRINGEXTPROC) (EGLDisplay dpy, EGLOutputPortEXT port, EGLint name); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglGetOutputLayersEXT (EGLDisplay dpy, const EGLAttrib *attrib_list, EGLOutputLayerEXT *layers, EGLint max_layers, EGLint *num_layers); +EGLAPI EGLBoolean EGLAPIENTRY eglGetOutputPortsEXT (EGLDisplay dpy, const EGLAttrib *attrib_list, EGLOutputPortEXT *ports, EGLint max_ports, EGLint *num_ports); +EGLAPI EGLBoolean EGLAPIENTRY eglOutputLayerAttribEXT (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryOutputLayerAttribEXT (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint attribute, EGLAttrib *value); +EGLAPI const char *EGLAPIENTRY eglQueryOutputLayerStringEXT (EGLDisplay dpy, EGLOutputLayerEXT layer, EGLint name); +EGLAPI EGLBoolean EGLAPIENTRY eglOutputPortAttribEXT (EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib value); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryOutputPortAttribEXT (EGLDisplay dpy, EGLOutputPortEXT port, EGLint attribute, EGLAttrib *value); +EGLAPI const char *EGLAPIENTRY eglQueryOutputPortStringEXT (EGLDisplay dpy, EGLOutputPortEXT port, EGLint name); +#endif +#endif /* EGL_EXT_output_base */ + +#ifndef EGL_EXT_output_drm +#define EGL_EXT_output_drm 1 +#define EGL_DRM_CRTC_EXT 0x3234 +#define EGL_DRM_PLANE_EXT 0x3235 +#define EGL_DRM_CONNECTOR_EXT 0x3236 +#endif /* EGL_EXT_output_drm */ + +#ifndef EGL_EXT_output_openwf +#define EGL_EXT_output_openwf 1 +#define EGL_OPENWF_PIPELINE_ID_EXT 0x3238 +#define EGL_OPENWF_PORT_ID_EXT 0x3239 +#endif /* EGL_EXT_output_openwf */ + +#ifndef EGL_EXT_pixel_format_float +#define EGL_EXT_pixel_format_float 1 +#define EGL_COLOR_COMPONENT_TYPE_EXT 0x3339 +#define EGL_COLOR_COMPONENT_TYPE_FIXED_EXT 0x333A +#define EGL_COLOR_COMPONENT_TYPE_FLOAT_EXT 0x333B +#endif /* EGL_EXT_pixel_format_float */ + +#ifndef EGL_EXT_platform_base +#define EGL_EXT_platform_base 1 +typedef EGLDisplay (EGLAPIENTRYP PFNEGLGETPLATFORMDISPLAYEXTPROC) (EGLenum platform, void *native_display, const EGLint *attrib_list); +typedef EGLSurface (EGLAPIENTRYP PFNEGLCREATEPLATFORMWINDOWSURFACEEXTPROC) (EGLDisplay dpy, EGLConfig config, void *native_window, const EGLint *attrib_list); +typedef EGLSurface (EGLAPIENTRYP PFNEGLCREATEPLATFORMPIXMAPSURFACEEXTPROC) (EGLDisplay dpy, EGLConfig config, void *native_pixmap, const EGLint *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLDisplay EGLAPIENTRY eglGetPlatformDisplayEXT (EGLenum platform, void *native_display, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePlatformWindowSurfaceEXT (EGLDisplay dpy, EGLConfig config, void *native_window, const EGLint *attrib_list); +EGLAPI EGLSurface EGLAPIENTRY eglCreatePlatformPixmapSurfaceEXT (EGLDisplay dpy, EGLConfig config, void *native_pixmap, const EGLint *attrib_list); +#endif +#endif /* EGL_EXT_platform_base */ + +#ifndef EGL_EXT_platform_device +#define EGL_EXT_platform_device 1 +#define EGL_PLATFORM_DEVICE_EXT 0x313F +#endif /* EGL_EXT_platform_device */ + +#ifndef EGL_EXT_platform_wayland +#define EGL_EXT_platform_wayland 1 +#define EGL_PLATFORM_WAYLAND_EXT 0x31D8 +#endif /* EGL_EXT_platform_wayland */ + +#ifndef EGL_EXT_platform_x11 +#define EGL_EXT_platform_x11 1 +#define EGL_PLATFORM_X11_EXT 0x31D5 +#define EGL_PLATFORM_X11_SCREEN_EXT 0x31D6 +#endif /* EGL_EXT_platform_x11 */ + +#ifndef EGL_EXT_protected_content +#define EGL_EXT_protected_content 1 +#define EGL_PROTECTED_CONTENT_EXT 0x32C0 +#endif /* EGL_EXT_protected_content */ + +#ifndef EGL_EXT_protected_surface +#define EGL_EXT_protected_surface 1 +#endif /* EGL_EXT_protected_surface */ + +#ifndef EGL_EXT_stream_consumer_egloutput +#define EGL_EXT_stream_consumer_egloutput 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMEROUTPUTEXTPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLOutputLayerEXT layer); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerOutputEXT (EGLDisplay dpy, EGLStreamKHR stream, EGLOutputLayerEXT layer); +#endif +#endif /* EGL_EXT_stream_consumer_egloutput */ + +#ifndef EGL_EXT_surface_CTA861_3_metadata +#define EGL_EXT_surface_CTA861_3_metadata 1 +#define EGL_CTA861_3_MAX_CONTENT_LIGHT_LEVEL_EXT 0x3360 +#define EGL_CTA861_3_MAX_FRAME_AVERAGE_LEVEL_EXT 0x3361 +#endif /* EGL_EXT_surface_CTA861_3_metadata */ + +#ifndef EGL_EXT_surface_SMPTE2086_metadata +#define EGL_EXT_surface_SMPTE2086_metadata 1 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_RX_EXT 0x3341 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_RY_EXT 0x3342 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_GX_EXT 0x3343 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_GY_EXT 0x3344 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_BX_EXT 0x3345 +#define EGL_SMPTE2086_DISPLAY_PRIMARY_BY_EXT 0x3346 +#define EGL_SMPTE2086_WHITE_POINT_X_EXT 0x3347 +#define EGL_SMPTE2086_WHITE_POINT_Y_EXT 0x3348 +#define EGL_SMPTE2086_MAX_LUMINANCE_EXT 0x3349 +#define EGL_SMPTE2086_MIN_LUMINANCE_EXT 0x334A +#define EGL_METADATA_SCALING_EXT 50000 +#endif /* EGL_EXT_surface_SMPTE2086_metadata */ + +#ifndef EGL_EXT_swap_buffers_with_damage +#define EGL_EXT_swap_buffers_with_damage 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSWAPBUFFERSWITHDAMAGEEXTPROC) (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffersWithDamageEXT (EGLDisplay dpy, EGLSurface surface, EGLint *rects, EGLint n_rects); +#endif +#endif /* EGL_EXT_swap_buffers_with_damage */ + +#ifndef EGL_EXT_yuv_surface +#define EGL_EXT_yuv_surface 1 +#define EGL_YUV_ORDER_EXT 0x3301 +#define EGL_YUV_NUMBER_OF_PLANES_EXT 0x3311 +#define EGL_YUV_SUBSAMPLE_EXT 0x3312 +#define EGL_YUV_DEPTH_RANGE_EXT 0x3317 +#define EGL_YUV_CSC_STANDARD_EXT 0x330A +#define EGL_YUV_PLANE_BPP_EXT 0x331A +#define EGL_YUV_BUFFER_EXT 0x3300 +#define EGL_YUV_ORDER_YUV_EXT 0x3302 +#define EGL_YUV_ORDER_YVU_EXT 0x3303 +#define EGL_YUV_ORDER_YUYV_EXT 0x3304 +#define EGL_YUV_ORDER_UYVY_EXT 0x3305 +#define EGL_YUV_ORDER_YVYU_EXT 0x3306 +#define EGL_YUV_ORDER_VYUY_EXT 0x3307 +#define EGL_YUV_ORDER_AYUV_EXT 0x3308 +#define EGL_YUV_SUBSAMPLE_4_2_0_EXT 0x3313 +#define EGL_YUV_SUBSAMPLE_4_2_2_EXT 0x3314 +#define EGL_YUV_SUBSAMPLE_4_4_4_EXT 0x3315 +#define EGL_YUV_DEPTH_RANGE_LIMITED_EXT 0x3318 +#define EGL_YUV_DEPTH_RANGE_FULL_EXT 0x3319 +#define EGL_YUV_CSC_STANDARD_601_EXT 0x330B +#define EGL_YUV_CSC_STANDARD_709_EXT 0x330C +#define EGL_YUV_CSC_STANDARD_2020_EXT 0x330D +#define EGL_YUV_PLANE_BPP_0_EXT 0x331B +#define EGL_YUV_PLANE_BPP_8_EXT 0x331C +#define EGL_YUV_PLANE_BPP_10_EXT 0x331D +#endif /* EGL_EXT_yuv_surface */ + +#ifndef EGL_HI_clientpixmap +#define EGL_HI_clientpixmap 1 +struct EGLClientPixmapHI { + void *pData; + EGLint iWidth; + EGLint iHeight; + EGLint iStride; +}; +#define EGL_CLIENT_PIXMAP_POINTER_HI 0x8F74 +typedef EGLSurface (EGLAPIENTRYP PFNEGLCREATEPIXMAPSURFACEHIPROC) (EGLDisplay dpy, EGLConfig config, struct EGLClientPixmapHI *pixmap); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSurface EGLAPIENTRY eglCreatePixmapSurfaceHI (EGLDisplay dpy, EGLConfig config, struct EGLClientPixmapHI *pixmap); +#endif +#endif /* EGL_HI_clientpixmap */ + +#ifndef EGL_HI_colorformats +#define EGL_HI_colorformats 1 +#define EGL_COLOR_FORMAT_HI 0x8F70 +#define EGL_COLOR_RGB_HI 0x8F71 +#define EGL_COLOR_RGBA_HI 0x8F72 +#define EGL_COLOR_ARGB_HI 0x8F73 +#endif /* EGL_HI_colorformats */ + +#ifndef EGL_IMG_context_priority +#define EGL_IMG_context_priority 1 +#define EGL_CONTEXT_PRIORITY_LEVEL_IMG 0x3100 +#define EGL_CONTEXT_PRIORITY_HIGH_IMG 0x3101 +#define EGL_CONTEXT_PRIORITY_MEDIUM_IMG 0x3102 +#define EGL_CONTEXT_PRIORITY_LOW_IMG 0x3103 +#endif /* EGL_IMG_context_priority */ + +#ifndef EGL_IMG_image_plane_attribs +#define EGL_IMG_image_plane_attribs 1 +#define EGL_NATIVE_BUFFER_MULTIPLANE_SEPARATE_IMG 0x3105 +#define EGL_NATIVE_BUFFER_PLANE_OFFSET_IMG 0x3106 +#endif /* EGL_IMG_image_plane_attribs */ + +#ifndef EGL_MESA_drm_image +#define EGL_MESA_drm_image 1 +#define EGL_DRM_BUFFER_FORMAT_MESA 0x31D0 +#define EGL_DRM_BUFFER_USE_MESA 0x31D1 +#define EGL_DRM_BUFFER_FORMAT_ARGB32_MESA 0x31D2 +#define EGL_DRM_BUFFER_MESA 0x31D3 +#define EGL_DRM_BUFFER_STRIDE_MESA 0x31D4 +#define EGL_DRM_BUFFER_USE_SCANOUT_MESA 0x00000001 +#define EGL_DRM_BUFFER_USE_SHARE_MESA 0x00000002 +typedef EGLImageKHR (EGLAPIENTRYP PFNEGLCREATEDRMIMAGEMESAPROC) (EGLDisplay dpy, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLEXPORTDRMIMAGEMESAPROC) (EGLDisplay dpy, EGLImageKHR image, EGLint *name, EGLint *handle, EGLint *stride); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLImageKHR EGLAPIENTRY eglCreateDRMImageMESA (EGLDisplay dpy, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglExportDRMImageMESA (EGLDisplay dpy, EGLImageKHR image, EGLint *name, EGLint *handle, EGLint *stride); +#endif +#endif /* EGL_MESA_drm_image */ + +#ifndef EGL_MESA_image_dma_buf_export +#define EGL_MESA_image_dma_buf_export 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLEXPORTDMABUFIMAGEQUERYMESAPROC) (EGLDisplay dpy, EGLImageKHR image, int *fourcc, int *num_planes, EGLuint64KHR *modifiers); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLEXPORTDMABUFIMAGEMESAPROC) (EGLDisplay dpy, EGLImageKHR image, int *fds, EGLint *strides, EGLint *offsets); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglExportDMABUFImageQueryMESA (EGLDisplay dpy, EGLImageKHR image, int *fourcc, int *num_planes, EGLuint64KHR *modifiers); +EGLAPI EGLBoolean EGLAPIENTRY eglExportDMABUFImageMESA (EGLDisplay dpy, EGLImageKHR image, int *fds, EGLint *strides, EGLint *offsets); +#endif +#endif /* EGL_MESA_image_dma_buf_export */ + +#ifndef EGL_MESA_platform_gbm +#define EGL_MESA_platform_gbm 1 +#define EGL_PLATFORM_GBM_MESA 0x31D7 +#endif /* EGL_MESA_platform_gbm */ + +#ifndef EGL_MESA_platform_surfaceless +#define EGL_MESA_platform_surfaceless 1 +#define EGL_PLATFORM_SURFACELESS_MESA 0x31DD +#endif /* EGL_MESA_platform_surfaceless */ + +#ifndef EGL_NOK_swap_region +#define EGL_NOK_swap_region 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSWAPBUFFERSREGIONNOKPROC) (EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint *rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffersRegionNOK (EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint *rects); +#endif +#endif /* EGL_NOK_swap_region */ + +#ifndef EGL_NOK_swap_region2 +#define EGL_NOK_swap_region2 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSWAPBUFFERSREGION2NOKPROC) (EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint *rects); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglSwapBuffersRegion2NOK (EGLDisplay dpy, EGLSurface surface, EGLint numRects, const EGLint *rects); +#endif +#endif /* EGL_NOK_swap_region2 */ + +#ifndef EGL_NOK_texture_from_pixmap +#define EGL_NOK_texture_from_pixmap 1 +#define EGL_Y_INVERTED_NOK 0x307F +#endif /* EGL_NOK_texture_from_pixmap */ + +#ifndef EGL_NV_3dvision_surface +#define EGL_NV_3dvision_surface 1 +#define EGL_AUTO_STEREO_NV 0x3136 +#endif /* EGL_NV_3dvision_surface */ + +#ifndef EGL_NV_coverage_sample +#define EGL_NV_coverage_sample 1 +#define EGL_COVERAGE_BUFFERS_NV 0x30E0 +#define EGL_COVERAGE_SAMPLES_NV 0x30E1 +#endif /* EGL_NV_coverage_sample */ + +#ifndef EGL_NV_coverage_sample_resolve +#define EGL_NV_coverage_sample_resolve 1 +#define EGL_COVERAGE_SAMPLE_RESOLVE_NV 0x3131 +#define EGL_COVERAGE_SAMPLE_RESOLVE_DEFAULT_NV 0x3132 +#define EGL_COVERAGE_SAMPLE_RESOLVE_NONE_NV 0x3133 +#endif /* EGL_NV_coverage_sample_resolve */ + +#ifndef EGL_NV_cuda_event +#define EGL_NV_cuda_event 1 +#define EGL_CUDA_EVENT_HANDLE_NV 0x323B +#define EGL_SYNC_CUDA_EVENT_NV 0x323C +#define EGL_SYNC_CUDA_EVENT_COMPLETE_NV 0x323D +#endif /* EGL_NV_cuda_event */ + +#ifndef EGL_NV_depth_nonlinear +#define EGL_NV_depth_nonlinear 1 +#define EGL_DEPTH_ENCODING_NV 0x30E2 +#define EGL_DEPTH_ENCODING_NONE_NV 0 +#define EGL_DEPTH_ENCODING_NONLINEAR_NV 0x30E3 +#endif /* EGL_NV_depth_nonlinear */ + +#ifndef EGL_NV_device_cuda +#define EGL_NV_device_cuda 1 +#define EGL_CUDA_DEVICE_NV 0x323A +#endif /* EGL_NV_device_cuda */ + +#ifndef EGL_NV_native_query +#define EGL_NV_native_query 1 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYNATIVEDISPLAYNVPROC) (EGLDisplay dpy, EGLNativeDisplayType *display_id); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYNATIVEWINDOWNVPROC) (EGLDisplay dpy, EGLSurface surf, EGLNativeWindowType *window); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYNATIVEPIXMAPNVPROC) (EGLDisplay dpy, EGLSurface surf, EGLNativePixmapType *pixmap); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryNativeDisplayNV (EGLDisplay dpy, EGLNativeDisplayType *display_id); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryNativeWindowNV (EGLDisplay dpy, EGLSurface surf, EGLNativeWindowType *window); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryNativePixmapNV (EGLDisplay dpy, EGLSurface surf, EGLNativePixmapType *pixmap); +#endif +#endif /* EGL_NV_native_query */ + +#ifndef EGL_NV_post_convert_rounding +#define EGL_NV_post_convert_rounding 1 +#endif /* EGL_NV_post_convert_rounding */ + +#ifndef EGL_NV_post_sub_buffer +#define EGL_NV_post_sub_buffer 1 +#define EGL_POST_SUB_BUFFER_SUPPORTED_NV 0x30BE +typedef EGLBoolean (EGLAPIENTRYP PFNEGLPOSTSUBBUFFERNVPROC) (EGLDisplay dpy, EGLSurface surface, EGLint x, EGLint y, EGLint width, EGLint height); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglPostSubBufferNV (EGLDisplay dpy, EGLSurface surface, EGLint x, EGLint y, EGLint width, EGLint height); +#endif +#endif /* EGL_NV_post_sub_buffer */ + +#ifndef EGL_NV_robustness_video_memory_purge +#define EGL_NV_robustness_video_memory_purge 1 +#define EGL_GENERATE_RESET_ON_VIDEO_MEMORY_PURGE_NV 0x334C +#endif /* EGL_NV_robustness_video_memory_purge */ + +#ifndef EGL_NV_stream_consumer_gltexture_yuv +#define EGL_NV_stream_consumer_gltexture_yuv 1 +#define EGL_YUV_PLANE0_TEXTURE_UNIT_NV 0x332C +#define EGL_YUV_PLANE1_TEXTURE_UNIT_NV 0x332D +#define EGL_YUV_PLANE2_TEXTURE_UNIT_NV 0x332E +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSTREAMCONSUMERGLTEXTUREEXTERNALATTRIBSNVPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLAttrib *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglStreamConsumerGLTextureExternalAttribsNV (EGLDisplay dpy, EGLStreamKHR stream, EGLAttrib *attrib_list); +#endif +#endif /* EGL_NV_stream_consumer_gltexture_yuv */ + +#ifndef EGL_NV_stream_cross_display +#define EGL_NV_stream_cross_display 1 +#define EGL_STREAM_CROSS_DISPLAY_NV 0x334E +#endif /* EGL_NV_stream_cross_display */ + +#ifndef EGL_NV_stream_cross_object +#define EGL_NV_stream_cross_object 1 +#define EGL_STREAM_CROSS_OBJECT_NV 0x334D +#endif /* EGL_NV_stream_cross_object */ + +#ifndef EGL_NV_stream_cross_partition +#define EGL_NV_stream_cross_partition 1 +#define EGL_STREAM_CROSS_PARTITION_NV 0x323F +#endif /* EGL_NV_stream_cross_partition */ + +#ifndef EGL_NV_stream_cross_process +#define EGL_NV_stream_cross_process 1 +#define EGL_STREAM_CROSS_PROCESS_NV 0x3245 +#endif /* EGL_NV_stream_cross_process */ + +#ifndef EGL_NV_stream_cross_system +#define EGL_NV_stream_cross_system 1 +#define EGL_STREAM_CROSS_SYSTEM_NV 0x334F +#endif /* EGL_NV_stream_cross_system */ + +#ifndef EGL_NV_stream_fifo_next +#define EGL_NV_stream_fifo_next 1 +#define EGL_PENDING_FRAME_NV 0x3329 +#define EGL_STREAM_TIME_PENDING_NV 0x332A +#endif /* EGL_NV_stream_fifo_next */ + +#ifndef EGL_NV_stream_fifo_synchronous +#define EGL_NV_stream_fifo_synchronous 1 +#define EGL_STREAM_FIFO_SYNCHRONOUS_NV 0x3336 +#endif /* EGL_NV_stream_fifo_synchronous */ + +#ifndef EGL_NV_stream_frame_limits +#define EGL_NV_stream_frame_limits 1 +#define EGL_PRODUCER_MAX_FRAME_HINT_NV 0x3337 +#define EGL_CONSUMER_MAX_FRAME_HINT_NV 0x3338 +#endif /* EGL_NV_stream_frame_limits */ + +#ifndef EGL_NV_stream_metadata +#define EGL_NV_stream_metadata 1 +#define EGL_MAX_STREAM_METADATA_BLOCKS_NV 0x3250 +#define EGL_MAX_STREAM_METADATA_BLOCK_SIZE_NV 0x3251 +#define EGL_MAX_STREAM_METADATA_TOTAL_SIZE_NV 0x3252 +#define EGL_PRODUCER_METADATA_NV 0x3253 +#define EGL_CONSUMER_METADATA_NV 0x3254 +#define EGL_PENDING_METADATA_NV 0x3328 +#define EGL_METADATA0_SIZE_NV 0x3255 +#define EGL_METADATA1_SIZE_NV 0x3256 +#define EGL_METADATA2_SIZE_NV 0x3257 +#define EGL_METADATA3_SIZE_NV 0x3258 +#define EGL_METADATA0_TYPE_NV 0x3259 +#define EGL_METADATA1_TYPE_NV 0x325A +#define EGL_METADATA2_TYPE_NV 0x325B +#define EGL_METADATA3_TYPE_NV 0x325C +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYDISPLAYATTRIBNVPROC) (EGLDisplay dpy, EGLint attribute, EGLAttrib *value); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSETSTREAMMETADATANVPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLint n, EGLint offset, EGLint size, const void *data); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLQUERYSTREAMMETADATANVPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum name, EGLint n, EGLint offset, EGLint size, void *data); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglQueryDisplayAttribNV (EGLDisplay dpy, EGLint attribute, EGLAttrib *value); +EGLAPI EGLBoolean EGLAPIENTRY eglSetStreamMetadataNV (EGLDisplay dpy, EGLStreamKHR stream, EGLint n, EGLint offset, EGLint size, const void *data); +EGLAPI EGLBoolean EGLAPIENTRY eglQueryStreamMetadataNV (EGLDisplay dpy, EGLStreamKHR stream, EGLenum name, EGLint n, EGLint offset, EGLint size, void *data); +#endif +#endif /* EGL_NV_stream_metadata */ + +#ifndef EGL_NV_stream_remote +#define EGL_NV_stream_remote 1 +#define EGL_STREAM_STATE_INITIALIZING_NV 0x3240 +#define EGL_STREAM_TYPE_NV 0x3241 +#define EGL_STREAM_PROTOCOL_NV 0x3242 +#define EGL_STREAM_ENDPOINT_NV 0x3243 +#define EGL_STREAM_LOCAL_NV 0x3244 +#define EGL_STREAM_PRODUCER_NV 0x3247 +#define EGL_STREAM_CONSUMER_NV 0x3248 +#define EGL_STREAM_PROTOCOL_FD_NV 0x3246 +#endif /* EGL_NV_stream_remote */ + +#ifndef EGL_NV_stream_reset +#define EGL_NV_stream_reset 1 +#define EGL_SUPPORT_RESET_NV 0x3334 +#define EGL_SUPPORT_REUSE_NV 0x3335 +typedef EGLBoolean (EGLAPIENTRYP PFNEGLRESETSTREAMNVPROC) (EGLDisplay dpy, EGLStreamKHR stream); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLBoolean EGLAPIENTRY eglResetStreamNV (EGLDisplay dpy, EGLStreamKHR stream); +#endif +#endif /* EGL_NV_stream_reset */ + +#ifndef EGL_NV_stream_socket +#define EGL_NV_stream_socket 1 +#define EGL_STREAM_PROTOCOL_SOCKET_NV 0x324B +#define EGL_SOCKET_HANDLE_NV 0x324C +#define EGL_SOCKET_TYPE_NV 0x324D +#endif /* EGL_NV_stream_socket */ + +#ifndef EGL_NV_stream_socket_inet +#define EGL_NV_stream_socket_inet 1 +#define EGL_SOCKET_TYPE_INET_NV 0x324F +#endif /* EGL_NV_stream_socket_inet */ + +#ifndef EGL_NV_stream_socket_unix +#define EGL_NV_stream_socket_unix 1 +#define EGL_SOCKET_TYPE_UNIX_NV 0x324E +#endif /* EGL_NV_stream_socket_unix */ + +#ifndef EGL_NV_stream_sync +#define EGL_NV_stream_sync 1 +#define EGL_SYNC_NEW_FRAME_NV 0x321F +typedef EGLSyncKHR (EGLAPIENTRYP PFNEGLCREATESTREAMSYNCNVPROC) (EGLDisplay dpy, EGLStreamKHR stream, EGLenum type, const EGLint *attrib_list); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSyncKHR EGLAPIENTRY eglCreateStreamSyncNV (EGLDisplay dpy, EGLStreamKHR stream, EGLenum type, const EGLint *attrib_list); +#endif +#endif /* EGL_NV_stream_sync */ + +#ifndef EGL_NV_sync +#define EGL_NV_sync 1 +typedef void *EGLSyncNV; +typedef khronos_utime_nanoseconds_t EGLTimeNV; +#ifdef KHRONOS_SUPPORT_INT64 +#define EGL_SYNC_PRIOR_COMMANDS_COMPLETE_NV 0x30E6 +#define EGL_SYNC_STATUS_NV 0x30E7 +#define EGL_SIGNALED_NV 0x30E8 +#define EGL_UNSIGNALED_NV 0x30E9 +#define EGL_SYNC_FLUSH_COMMANDS_BIT_NV 0x0001 +#define EGL_FOREVER_NV 0xFFFFFFFFFFFFFFFFull +#define EGL_ALREADY_SIGNALED_NV 0x30EA +#define EGL_TIMEOUT_EXPIRED_NV 0x30EB +#define EGL_CONDITION_SATISFIED_NV 0x30EC +#define EGL_SYNC_TYPE_NV 0x30ED +#define EGL_SYNC_CONDITION_NV 0x30EE +#define EGL_SYNC_FENCE_NV 0x30EF +#define EGL_NO_SYNC_NV EGL_CAST(EGLSyncNV,0) +typedef EGLSyncNV (EGLAPIENTRYP PFNEGLCREATEFENCESYNCNVPROC) (EGLDisplay dpy, EGLenum condition, const EGLint *attrib_list); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLDESTROYSYNCNVPROC) (EGLSyncNV sync); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLFENCENVPROC) (EGLSyncNV sync); +typedef EGLint (EGLAPIENTRYP PFNEGLCLIENTWAITSYNCNVPROC) (EGLSyncNV sync, EGLint flags, EGLTimeNV timeout); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLSIGNALSYNCNVPROC) (EGLSyncNV sync, EGLenum mode); +typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETSYNCATTRIBNVPROC) (EGLSyncNV sync, EGLint attribute, EGLint *value); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLSyncNV EGLAPIENTRY eglCreateFenceSyncNV (EGLDisplay dpy, EGLenum condition, const EGLint *attrib_list); +EGLAPI EGLBoolean EGLAPIENTRY eglDestroySyncNV (EGLSyncNV sync); +EGLAPI EGLBoolean EGLAPIENTRY eglFenceNV (EGLSyncNV sync); +EGLAPI EGLint EGLAPIENTRY eglClientWaitSyncNV (EGLSyncNV sync, EGLint flags, EGLTimeNV timeout); +EGLAPI EGLBoolean EGLAPIENTRY eglSignalSyncNV (EGLSyncNV sync, EGLenum mode); +EGLAPI EGLBoolean EGLAPIENTRY eglGetSyncAttribNV (EGLSyncNV sync, EGLint attribute, EGLint *value); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_NV_sync */ + +#ifndef EGL_NV_system_time +#define EGL_NV_system_time 1 +typedef khronos_utime_nanoseconds_t EGLuint64NV; +#ifdef KHRONOS_SUPPORT_INT64 +typedef EGLuint64NV (EGLAPIENTRYP PFNEGLGETSYSTEMTIMEFREQUENCYNVPROC) (void); +typedef EGLuint64NV (EGLAPIENTRYP PFNEGLGETSYSTEMTIMENVPROC) (void); +#ifdef EGL_EGLEXT_PROTOTYPES +EGLAPI EGLuint64NV EGLAPIENTRY eglGetSystemTimeFrequencyNV (void); +EGLAPI EGLuint64NV EGLAPIENTRY eglGetSystemTimeNV (void); +#endif +#endif /* KHRONOS_SUPPORT_INT64 */ +#endif /* EGL_NV_system_time */ + +#ifndef EGL_TIZEN_image_native_buffer +#define EGL_TIZEN_image_native_buffer 1 +#define EGL_NATIVE_BUFFER_TIZEN 0x32A0 +#endif /* EGL_TIZEN_image_native_buffer */ + +#ifndef EGL_TIZEN_image_native_surface +#define EGL_TIZEN_image_native_surface 1 +#define EGL_NATIVE_SURFACE_TIZEN 0x32A1 +#endif /* EGL_TIZEN_image_native_surface */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/EGL/eglplatform.h b/sources/pvrsdk/Include/EGL/eglplatform.h new file mode 100755 index 00000000..4fd279dd --- /dev/null +++ b/sources/pvrsdk/Include/EGL/eglplatform.h @@ -0,0 +1,143 @@ +#ifndef __eglplatform_h_ +#define __eglplatform_h_ + +/* +** Copyright (c) 2007-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +/* Platform-specific types and definitions for egl.h + * $Revision: 30994 $ on $Date: 2015-04-30 13:36:48 -0700 (Thu, 30 Apr 2015) $ + * + * Adopters may modify khrplatform.h and this file to suit their platform. + * You are encouraged to submit all modifications to the Khronos group so that + * they can be included in future versions of this file. Please submit changes + * by sending them to the public Khronos Bugzilla (http://khronos.org/bugzilla) + * by filing a bug against product "EGL" component "Registry". + */ + +#include "KHR/khrplatform.h" + +/* Macros used in EGL function prototype declarations. + * + * EGL functions should be prototyped as: + * + * EGLAPI return-type EGLAPIENTRY eglFunction(arguments); + * typedef return-type (EXPAPIENTRYP PFNEGLFUNCTIONPROC) (arguments); + * + * KHRONOS_APICALL and KHRONOS_APIENTRY are defined in KHR/khrplatform.h + */ + +#ifndef EGLAPI +#define EGLAPI KHRONOS_APICALL +#endif + +#ifndef EGLAPIENTRY +#define EGLAPIENTRY KHRONOS_APIENTRY +#endif +#define EGLAPIENTRYP EGLAPIENTRY* + +/* The types NativeDisplayType, NativeWindowType, and NativePixmapType + * are aliases of window-system-dependent types, such as X Display * or + * Windows Device Context. They must be defined in platform-specific + * code below. The EGL-prefixed versions of Native*Type are the same + * types, renamed in EGL 1.3 so all types in the API start with "EGL". + * + * Khronos STRONGLY RECOMMENDS that you use the default definitions + * provided below, since these changes affect both binary and source + * portability of applications using EGL running on different EGL + * implementations. + */ + +#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__) /* Win32 and WinCE */ +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN 1 +#endif +#include + +typedef HDC EGLNativeDisplayType; +typedef HBITMAP EGLNativePixmapType; +typedef HWND EGLNativeWindowType; + +#elif defined(__APPLE__) || defined(__WINSCW__) || defined(__SYMBIAN32__) /* Symbian */ + +typedef int EGLNativeDisplayType; +typedef void *EGLNativeWindowType; +typedef void *EGLNativePixmapType; + +#elif defined(__ANDROID__) || defined(ANDROID) + +#include + +struct egl_native_pixmap_t; + +typedef struct ANativeWindow* EGLNativeWindowType; +typedef struct egl_native_pixmap_t* EGLNativePixmapType; +typedef void* EGLNativeDisplayType; + +#elif defined(SUPPORT_X11) + +/* X11 (tentative) */ +#include +#include + +typedef Display *EGLNativeDisplayType; +typedef Pixmap EGLNativePixmapType; +typedef Window EGLNativeWindowType; + +#else + +#if defined(_WIN64) || __WORDSIZE == 64 +typedef khronos_int64_t EGLNativeDisplayType; +#else +typedef int EGLNativeDisplayType; +#endif + +typedef void *EGLNativeWindowType; +typedef void *EGLNativePixmapType; + + +#endif + +/* EGL 1.2 types, renamed for consistency in EGL 1.3 */ +typedef EGLNativeDisplayType NativeDisplayType; +typedef EGLNativePixmapType NativePixmapType; +typedef EGLNativeWindowType NativeWindowType; + + +/* Define EGLint. This must be a signed integral type large enough to contain + * all legal attribute names and values passed into and out of EGL, whether + * their type is boolean, bitmask, enumerant (symbolic constant), integer, + * handle, or other. While in general a 32-bit integer will suffice, if + * handles are 64 bit types, then EGLint should be defined as a signed 64-bit + * integer type. + */ +typedef khronos_int32_t EGLint; + + +/* C++ / C typecast macros for special EGL handle values */ +#if defined(__cplusplus) +#define EGL_CAST(type, value) (static_cast(value)) +#else +#define EGL_CAST(type, value) ((type) (value)) +#endif + +#endif /* __eglplatform_h */ diff --git a/sources/pvrsdk/Include/GLES/egl.h b/sources/pvrsdk/Include/GLES/egl.h new file mode 100755 index 00000000..86f644c9 --- /dev/null +++ b/sources/pvrsdk/Include/GLES/egl.h @@ -0,0 +1,29 @@ +/* +** Copyright (c) 2008-2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* + * Skeleton egl.h to provide compatibility for early GLES 1.0 + * applications. Several early implementations included gl.h + * in egl.h leading applications to include only egl.h + */ + +#ifndef __legacy_egl_h_ +#define __legacy_egl_h_ + +#include +#include + +#endif /* __legacy_egl_h_ */ diff --git a/sources/pvrsdk/Include/GLES/gl.h b/sources/pvrsdk/Include/GLES/gl.h new file mode 100755 index 00000000..df2cc4f9 --- /dev/null +++ b/sources/pvrsdk/Include/GLES/gl.h @@ -0,0 +1,586 @@ +#ifndef __gl_h_ +#define __gl_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#include + +/* Generated on date 20170209 */ + +/* Generated C header for: + * API: gles1 + * Profile: common + * Versions considered: .* + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: ^(GL_OES_read_format|GL_OES_compressed_paletted_texture|GL_OES_point_size_array|GL_OES_point_sprite)$ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_VERSION_ES_CM_1_0 +#define GL_VERSION_ES_CM_1_0 1 +typedef void GLvoid; +typedef unsigned int GLenum; +#include +typedef khronos_float_t GLfloat; +typedef khronos_int32_t GLfixed; +typedef unsigned int GLuint; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef khronos_uint8_t GLubyte; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_int32_t GLclampx; +#define GL_VERSION_ES_CL_1_0 1 +#define GL_VERSION_ES_CM_1_1 1 +#define GL_VERSION_ES_CL_1_1 1 +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_CLIP_PLANE0 0x3000 +#define GL_CLIP_PLANE1 0x3001 +#define GL_CLIP_PLANE2 0x3002 +#define GL_CLIP_PLANE3 0x3003 +#define GL_CLIP_PLANE4 0x3004 +#define GL_CLIP_PLANE5 0x3005 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_FOG 0x0B60 +#define GL_LIGHTING 0x0B50 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_ALPHA_TEST 0x0BC0 +#define GL_BLEND 0x0BE2 +#define GL_COLOR_LOGIC_OP 0x0BF2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_POINT_SMOOTH 0x0B10 +#define GL_LINE_SMOOTH 0x0B20 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_COLOR_MATERIAL 0x0B57 +#define GL_NORMALIZE 0x0BA1 +#define GL_RESCALE_NORMAL 0x803A +#define GL_VERTEX_ARRAY 0x8074 +#define GL_NORMAL_ARRAY 0x8075 +#define GL_COLOR_ARRAY 0x8076 +#define GL_TEXTURE_COORD_ARRAY 0x8078 +#define GL_MULTISAMPLE 0x809D +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_ALPHA_TO_ONE 0x809F +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_STACK_OVERFLOW 0x0503 +#define GL_STACK_UNDERFLOW 0x0504 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_EXP 0x0800 +#define GL_EXP2 0x0801 +#define GL_FOG_DENSITY 0x0B62 +#define GL_FOG_START 0x0B63 +#define GL_FOG_END 0x0B64 +#define GL_FOG_MODE 0x0B65 +#define GL_FOG_COLOR 0x0B66 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_CURRENT_COLOR 0x0B00 +#define GL_CURRENT_NORMAL 0x0B02 +#define GL_CURRENT_TEXTURE_COORDS 0x0B03 +#define GL_POINT_SIZE 0x0B11 +#define GL_POINT_SIZE_MIN 0x8126 +#define GL_POINT_SIZE_MAX 0x8127 +#define GL_POINT_FADE_THRESHOLD_SIZE 0x8128 +#define GL_POINT_DISTANCE_ATTENUATION 0x8129 +#define GL_SMOOTH_POINT_SIZE_RANGE 0x0B12 +#define GL_LINE_WIDTH 0x0B21 +#define GL_SMOOTH_LINE_WIDTH_RANGE 0x0B22 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_SHADE_MODEL 0x0B54 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_MATRIX_MODE 0x0BA0 +#define GL_VIEWPORT 0x0BA2 +#define GL_MODELVIEW_STACK_DEPTH 0x0BA3 +#define GL_PROJECTION_STACK_DEPTH 0x0BA4 +#define GL_TEXTURE_STACK_DEPTH 0x0BA5 +#define GL_MODELVIEW_MATRIX 0x0BA6 +#define GL_PROJECTION_MATRIX 0x0BA7 +#define GL_TEXTURE_MATRIX 0x0BA8 +#define GL_ALPHA_TEST_FUNC 0x0BC1 +#define GL_ALPHA_TEST_REF 0x0BC2 +#define GL_BLEND_DST 0x0BE0 +#define GL_BLEND_SRC 0x0BE1 +#define GL_LOGIC_OP_MODE 0x0BF0 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_MAX_LIGHTS 0x0D31 +#define GL_MAX_CLIP_PLANES 0x0D32 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_MODELVIEW_STACK_DEPTH 0x0D36 +#define GL_MAX_PROJECTION_STACK_DEPTH 0x0D38 +#define GL_MAX_TEXTURE_STACK_DEPTH 0x0D39 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_MAX_TEXTURE_UNITS 0x84E2 +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_VERTEX_ARRAY_SIZE 0x807A +#define GL_VERTEX_ARRAY_TYPE 0x807B +#define GL_VERTEX_ARRAY_STRIDE 0x807C +#define GL_NORMAL_ARRAY_TYPE 0x807E +#define GL_NORMAL_ARRAY_STRIDE 0x807F +#define GL_COLOR_ARRAY_SIZE 0x8081 +#define GL_COLOR_ARRAY_TYPE 0x8082 +#define GL_COLOR_ARRAY_STRIDE 0x8083 +#define GL_TEXTURE_COORD_ARRAY_SIZE 0x8088 +#define GL_TEXTURE_COORD_ARRAY_TYPE 0x8089 +#define GL_TEXTURE_COORD_ARRAY_STRIDE 0x808A +#define GL_VERTEX_ARRAY_POINTER 0x808E +#define GL_NORMAL_ARRAY_POINTER 0x808F +#define GL_COLOR_ARRAY_POINTER 0x8090 +#define GL_TEXTURE_COORD_ARRAY_POINTER 0x8092 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_PERSPECTIVE_CORRECTION_HINT 0x0C50 +#define GL_POINT_SMOOTH_HINT 0x0C51 +#define GL_LINE_SMOOTH_HINT 0x0C52 +#define GL_FOG_HINT 0x0C54 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_LIGHT_MODEL_AMBIENT 0x0B53 +#define GL_LIGHT_MODEL_TWO_SIDE 0x0B52 +#define GL_AMBIENT 0x1200 +#define GL_DIFFUSE 0x1201 +#define GL_SPECULAR 0x1202 +#define GL_POSITION 0x1203 +#define GL_SPOT_DIRECTION 0x1204 +#define GL_SPOT_EXPONENT 0x1205 +#define GL_SPOT_CUTOFF 0x1206 +#define GL_CONSTANT_ATTENUATION 0x1207 +#define GL_LINEAR_ATTENUATION 0x1208 +#define GL_QUADRATIC_ATTENUATION 0x1209 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_CLEAR 0x1500 +#define GL_AND 0x1501 +#define GL_AND_REVERSE 0x1502 +#define GL_COPY 0x1503 +#define GL_AND_INVERTED 0x1504 +#define GL_NOOP 0x1505 +#define GL_XOR 0x1506 +#define GL_OR 0x1507 +#define GL_NOR 0x1508 +#define GL_EQUIV 0x1509 +#define GL_INVERT 0x150A +#define GL_OR_REVERSE 0x150B +#define GL_COPY_INVERTED 0x150C +#define GL_OR_INVERTED 0x150D +#define GL_NAND 0x150E +#define GL_SET 0x150F +#define GL_EMISSION 0x1600 +#define GL_SHININESS 0x1601 +#define GL_AMBIENT_AND_DIFFUSE 0x1602 +#define GL_MODELVIEW 0x1700 +#define GL_PROJECTION 0x1701 +#define GL_TEXTURE 0x1702 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FLAT 0x1D00 +#define GL_SMOOTH 0x1D01 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_MODULATE 0x2100 +#define GL_DECAL 0x2101 +#define GL_ADD 0x0104 +#define GL_TEXTURE_ENV_MODE 0x2200 +#define GL_TEXTURE_ENV_COLOR 0x2201 +#define GL_TEXTURE_ENV 0x2300 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_GENERATE_MIPMAP 0x8191 +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_CLIENT_ACTIVE_TEXTURE 0x84E1 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_LIGHT0 0x4000 +#define GL_LIGHT1 0x4001 +#define GL_LIGHT2 0x4002 +#define GL_LIGHT3 0x4003 +#define GL_LIGHT4 0x4004 +#define GL_LIGHT5 0x4005 +#define GL_LIGHT6 0x4006 +#define GL_LIGHT7 0x4007 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_VERTEX_ARRAY_BUFFER_BINDING 0x8896 +#define GL_NORMAL_ARRAY_BUFFER_BINDING 0x8897 +#define GL_COLOR_ARRAY_BUFFER_BINDING 0x8898 +#define GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING 0x889A +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_SUBTRACT 0x84E7 +#define GL_COMBINE 0x8570 +#define GL_COMBINE_RGB 0x8571 +#define GL_COMBINE_ALPHA 0x8572 +#define GL_RGB_SCALE 0x8573 +#define GL_ADD_SIGNED 0x8574 +#define GL_INTERPOLATE 0x8575 +#define GL_CONSTANT 0x8576 +#define GL_PRIMARY_COLOR 0x8577 +#define GL_PREVIOUS 0x8578 +#define GL_OPERAND0_RGB 0x8590 +#define GL_OPERAND1_RGB 0x8591 +#define GL_OPERAND2_RGB 0x8592 +#define GL_OPERAND0_ALPHA 0x8598 +#define GL_OPERAND1_ALPHA 0x8599 +#define GL_OPERAND2_ALPHA 0x859A +#define GL_ALPHA_SCALE 0x0D1C +#define GL_SRC0_RGB 0x8580 +#define GL_SRC1_RGB 0x8581 +#define GL_SRC2_RGB 0x8582 +#define GL_SRC0_ALPHA 0x8588 +#define GL_SRC1_ALPHA 0x8589 +#define GL_SRC2_ALPHA 0x858A +#define GL_DOT3_RGB 0x86AE +#define GL_DOT3_RGBA 0x86AF +GL_API void GL_APIENTRY glAlphaFunc (GLenum func, GLfloat ref); +GL_API void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_API void GL_APIENTRY glClearDepthf (GLfloat d); +GL_API void GL_APIENTRY glClipPlanef (GLenum p, const GLfloat *eqn); +GL_API void GL_APIENTRY glColor4f (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_API void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_API void GL_APIENTRY glFogf (GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glFogfv (GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glFrustumf (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +GL_API void GL_APIENTRY glGetClipPlanef (GLenum plane, GLfloat *equation); +GL_API void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_API void GL_APIENTRY glGetLightfv (GLenum light, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glGetMaterialfv (GLenum face, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glGetTexEnvfv (GLenum target, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glLightModelf (GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glLightModelfv (GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glLightf (GLenum light, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glLightfv (GLenum light, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glLineWidth (GLfloat width); +GL_API void GL_APIENTRY glLoadMatrixf (const GLfloat *m); +GL_API void GL_APIENTRY glMaterialf (GLenum face, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glMaterialfv (GLenum face, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glMultMatrixf (const GLfloat *m); +GL_API void GL_APIENTRY glMultiTexCoord4f (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q); +GL_API void GL_APIENTRY glNormal3f (GLfloat nx, GLfloat ny, GLfloat nz); +GL_API void GL_APIENTRY glOrthof (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +GL_API void GL_APIENTRY glPointParameterf (GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glPointParameterfv (GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glPointSize (GLfloat size); +GL_API void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_API void GL_APIENTRY glRotatef (GLfloat angle, GLfloat x, GLfloat y, GLfloat z); +GL_API void GL_APIENTRY glScalef (GLfloat x, GLfloat y, GLfloat z); +GL_API void GL_APIENTRY glTexEnvf (GLenum target, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glTexEnvfv (GLenum target, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glTranslatef (GLfloat x, GLfloat y, GLfloat z); +GL_API void GL_APIENTRY glActiveTexture (GLenum texture); +GL_API void GL_APIENTRY glAlphaFuncx (GLenum func, GLfixed ref); +GL_API void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_API void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_API void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_API void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_API void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_API void GL_APIENTRY glClear (GLbitfield mask); +GL_API void GL_APIENTRY glClearColorx (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +GL_API void GL_APIENTRY glClearDepthx (GLfixed depth); +GL_API void GL_APIENTRY glClearStencil (GLint s); +GL_API void GL_APIENTRY glClientActiveTexture (GLenum texture); +GL_API void GL_APIENTRY glClipPlanex (GLenum plane, const GLfixed *equation); +GL_API void GL_APIENTRY glColor4ub (GLubyte red, GLubyte green, GLubyte blue, GLubyte alpha); +GL_API void GL_APIENTRY glColor4x (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +GL_API void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_API void GL_APIENTRY glColorPointer (GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_API void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_API void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_API void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glCullFace (GLenum mode); +GL_API void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_API void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_API void GL_APIENTRY glDepthFunc (GLenum func); +GL_API void GL_APIENTRY glDepthMask (GLboolean flag); +GL_API void GL_APIENTRY glDepthRangex (GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glDisable (GLenum cap); +GL_API void GL_APIENTRY glDisableClientState (GLenum array); +GL_API void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_API void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_API void GL_APIENTRY glEnable (GLenum cap); +GL_API void GL_APIENTRY glEnableClientState (GLenum array); +GL_API void GL_APIENTRY glFinish (void); +GL_API void GL_APIENTRY glFlush (void); +GL_API void GL_APIENTRY glFogx (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glFogxv (GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glFrontFace (GLenum mode); +GL_API void GL_APIENTRY glFrustumx (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_API void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glGetClipPlanex (GLenum plane, GLfixed *equation); +GL_API void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_API void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_API GLenum GL_APIENTRY glGetError (void); +GL_API void GL_APIENTRY glGetFixedv (GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_API void GL_APIENTRY glGetLightxv (GLenum light, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetMaterialxv (GLenum face, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetPointerv (GLenum pname, void **params); +GL_API const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_API void GL_APIENTRY glGetTexEnviv (GLenum target, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glGetTexEnvxv (GLenum target, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glGetTexParameterxv (GLenum target, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_API GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_API GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_API GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_API void GL_APIENTRY glLightModelx (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glLightModelxv (GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glLightx (GLenum light, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glLightxv (GLenum light, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glLineWidthx (GLfixed width); +GL_API void GL_APIENTRY glLoadIdentity (void); +GL_API void GL_APIENTRY glLoadMatrixx (const GLfixed *m); +GL_API void GL_APIENTRY glLogicOp (GLenum opcode); +GL_API void GL_APIENTRY glMaterialx (GLenum face, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glMaterialxv (GLenum face, GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glMatrixMode (GLenum mode); +GL_API void GL_APIENTRY glMultMatrixx (const GLfixed *m); +GL_API void GL_APIENTRY glMultiTexCoord4x (GLenum texture, GLfixed s, GLfixed t, GLfixed r, GLfixed q); +GL_API void GL_APIENTRY glNormal3x (GLfixed nx, GLfixed ny, GLfixed nz); +GL_API void GL_APIENTRY glNormalPointer (GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glOrthox (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_API void GL_APIENTRY glPointParameterx (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glPointParameterxv (GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glPointSizex (GLfixed size); +GL_API void GL_APIENTRY glPolygonOffsetx (GLfixed factor, GLfixed units); +GL_API void GL_APIENTRY glPopMatrix (void); +GL_API void GL_APIENTRY glPushMatrix (void); +GL_API void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_API void GL_APIENTRY glRotatex (GLfixed angle, GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_API void GL_APIENTRY glSampleCoveragex (GLclampx value, GLboolean invert); +GL_API void GL_APIENTRY glScalex (GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glShadeModel (GLenum mode); +GL_API void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_API void GL_APIENTRY glStencilMask (GLuint mask); +GL_API void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_API void GL_APIENTRY glTexCoordPointer (GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glTexEnvi (GLenum target, GLenum pname, GLint param); +GL_API void GL_APIENTRY glTexEnvx (GLenum target, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexEnviv (GLenum target, GLenum pname, const GLint *params); +GL_API void GL_APIENTRY glTexEnvxv (GLenum target, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_API void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_API void GL_APIENTRY glTexParameterx (GLenum target, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_API void GL_APIENTRY glTexParameterxv (GLenum target, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_API void GL_APIENTRY glTranslatex (GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glVertexPointer (GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif /* GL_VERSION_ES_CM_1_0 */ + +#ifndef GL_OES_compressed_paletted_texture +#define GL_OES_compressed_paletted_texture 1 +#define GL_PALETTE4_RGB8_OES 0x8B90 +#define GL_PALETTE4_RGBA8_OES 0x8B91 +#define GL_PALETTE4_R5_G6_B5_OES 0x8B92 +#define GL_PALETTE4_RGBA4_OES 0x8B93 +#define GL_PALETTE4_RGB5_A1_OES 0x8B94 +#define GL_PALETTE8_RGB8_OES 0x8B95 +#define GL_PALETTE8_RGBA8_OES 0x8B96 +#define GL_PALETTE8_R5_G6_B5_OES 0x8B97 +#define GL_PALETTE8_RGBA4_OES 0x8B98 +#define GL_PALETTE8_RGB5_A1_OES 0x8B99 +#endif /* GL_OES_compressed_paletted_texture */ + +#ifndef GL_OES_point_size_array +#define GL_OES_point_size_array 1 +#define GL_POINT_SIZE_ARRAY_OES 0x8B9C +#define GL_POINT_SIZE_ARRAY_TYPE_OES 0x898A +#define GL_POINT_SIZE_ARRAY_STRIDE_OES 0x898B +#define GL_POINT_SIZE_ARRAY_POINTER_OES 0x898C +#define GL_POINT_SIZE_ARRAY_BUFFER_BINDING_OES 0x8B9F +GL_API void GL_APIENTRY glPointSizePointerOES (GLenum type, GLsizei stride, const void *pointer); +#endif /* GL_OES_point_size_array */ + +#ifndef GL_OES_point_sprite +#define GL_OES_point_sprite 1 +#define GL_POINT_SPRITE_OES 0x8861 +#define GL_COORD_REPLACE_OES 0x8862 +#endif /* GL_OES_point_sprite */ + +#ifndef GL_OES_read_format +#define GL_OES_read_format 1 +#define GL_IMPLEMENTATION_COLOR_READ_TYPE_OES 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES 0x8B9B +#endif /* GL_OES_read_format */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/GLES/glext.h b/sources/pvrsdk/Include/GLES/glext.h new file mode 100755 index 00000000..1026f29a --- /dev/null +++ b/sources/pvrsdk/Include/GLES/glext.h @@ -0,0 +1,951 @@ +#ifndef __glext_h_ +#define __glext_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +/* Generated on date 20170209 */ + +/* Generated C header for: + * API: gles1 + * Profile: common + * Versions considered: .* + * Versions emitted: _nomatch_^ + * Default extensions included: gles1 + * Additional extensions included: _nomatch_^ + * Extensions removed: ^(GL_OES_read_format|GL_OES_compressed_paletted_texture|GL_OES_point_size_array|GL_OES_point_sprite)$ + */ + +#ifndef GL_OES_EGL_image +#define GL_OES_EGL_image 1 +typedef void *GLeglImageOES; +typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) (GLenum target, GLeglImageOES image); +typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETRENDERBUFFERSTORAGEOESPROC) (GLenum target, GLeglImageOES image); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glEGLImageTargetTexture2DOES (GLenum target, GLeglImageOES image); +GL_API void GL_APIENTRY glEGLImageTargetRenderbufferStorageOES (GLenum target, GLeglImageOES image); +#endif +#endif /* GL_OES_EGL_image */ + +#ifndef GL_OES_EGL_image_external +#define GL_OES_EGL_image_external 1 +#define GL_TEXTURE_EXTERNAL_OES 0x8D65 +#define GL_TEXTURE_BINDING_EXTERNAL_OES 0x8D67 +#define GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES 0x8D68 +#endif /* GL_OES_EGL_image_external */ + +#ifndef GL_OES_blend_equation_separate +#define GL_OES_blend_equation_separate 1 +#define GL_BLEND_EQUATION_RGB_OES 0x8009 +#define GL_BLEND_EQUATION_ALPHA_OES 0x883D +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEOESPROC) (GLenum modeRGB, GLenum modeAlpha); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glBlendEquationSeparateOES (GLenum modeRGB, GLenum modeAlpha); +#endif +#endif /* GL_OES_blend_equation_separate */ + +#ifndef GL_OES_blend_func_separate +#define GL_OES_blend_func_separate 1 +#define GL_BLEND_DST_RGB_OES 0x80C8 +#define GL_BLEND_SRC_RGB_OES 0x80C9 +#define GL_BLEND_DST_ALPHA_OES 0x80CA +#define GL_BLEND_SRC_ALPHA_OES 0x80CB +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEOESPROC) (GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glBlendFuncSeparateOES (GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +#endif +#endif /* GL_OES_blend_func_separate */ + +#ifndef GL_OES_blend_subtract +#define GL_OES_blend_subtract 1 +#define GL_BLEND_EQUATION_OES 0x8009 +#define GL_FUNC_ADD_OES 0x8006 +#define GL_FUNC_SUBTRACT_OES 0x800A +#define GL_FUNC_REVERSE_SUBTRACT_OES 0x800B +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONOESPROC) (GLenum mode); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glBlendEquationOES (GLenum mode); +#endif +#endif /* GL_OES_blend_subtract */ + +#ifndef GL_OES_byte_coordinates +#define GL_OES_byte_coordinates 1 +typedef khronos_int8_t GLbyte; +#endif /* GL_OES_byte_coordinates */ + +#ifndef GL_OES_compressed_ETC1_RGB8_sub_texture +#define GL_OES_compressed_ETC1_RGB8_sub_texture 1 +#endif /* GL_OES_compressed_ETC1_RGB8_sub_texture */ + +#ifndef GL_OES_compressed_ETC1_RGB8_texture +#define GL_OES_compressed_ETC1_RGB8_texture 1 +#define GL_ETC1_RGB8_OES 0x8D64 +#endif /* GL_OES_compressed_ETC1_RGB8_texture */ + +#ifndef GL_OES_depth24 +#define GL_OES_depth24 1 +#define GL_DEPTH_COMPONENT24_OES 0x81A6 +#endif /* GL_OES_depth24 */ + +#ifndef GL_OES_depth32 +#define GL_OES_depth32 1 +#define GL_DEPTH_COMPONENT32_OES 0x81A7 +#endif /* GL_OES_depth32 */ + +#ifndef GL_OES_draw_texture +#define GL_OES_draw_texture 1 +typedef short GLshort; +#define GL_TEXTURE_CROP_RECT_OES 0x8B9D +typedef void (GL_APIENTRYP PFNGLDRAWTEXSOESPROC) (GLshort x, GLshort y, GLshort z, GLshort width, GLshort height); +typedef void (GL_APIENTRYP PFNGLDRAWTEXIOESPROC) (GLint x, GLint y, GLint z, GLint width, GLint height); +typedef void (GL_APIENTRYP PFNGLDRAWTEXXOESPROC) (GLfixed x, GLfixed y, GLfixed z, GLfixed width, GLfixed height); +typedef void (GL_APIENTRYP PFNGLDRAWTEXSVOESPROC) (const GLshort *coords); +typedef void (GL_APIENTRYP PFNGLDRAWTEXIVOESPROC) (const GLint *coords); +typedef void (GL_APIENTRYP PFNGLDRAWTEXXVOESPROC) (const GLfixed *coords); +typedef void (GL_APIENTRYP PFNGLDRAWTEXFOESPROC) (GLfloat x, GLfloat y, GLfloat z, GLfloat width, GLfloat height); +typedef void (GL_APIENTRYP PFNGLDRAWTEXFVOESPROC) (const GLfloat *coords); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glDrawTexsOES (GLshort x, GLshort y, GLshort z, GLshort width, GLshort height); +GL_API void GL_APIENTRY glDrawTexiOES (GLint x, GLint y, GLint z, GLint width, GLint height); +GL_API void GL_APIENTRY glDrawTexxOES (GLfixed x, GLfixed y, GLfixed z, GLfixed width, GLfixed height); +GL_API void GL_APIENTRY glDrawTexsvOES (const GLshort *coords); +GL_API void GL_APIENTRY glDrawTexivOES (const GLint *coords); +GL_API void GL_APIENTRY glDrawTexxvOES (const GLfixed *coords); +GL_API void GL_APIENTRY glDrawTexfOES (GLfloat x, GLfloat y, GLfloat z, GLfloat width, GLfloat height); +GL_API void GL_APIENTRY glDrawTexfvOES (const GLfloat *coords); +#endif +#endif /* GL_OES_draw_texture */ + +#ifndef GL_OES_element_index_uint +#define GL_OES_element_index_uint 1 +#define GL_UNSIGNED_INT 0x1405 +#endif /* GL_OES_element_index_uint */ + +#ifndef GL_OES_extended_matrix_palette +#define GL_OES_extended_matrix_palette 1 +#endif /* GL_OES_extended_matrix_palette */ + +#ifndef GL_OES_fbo_render_mipmap +#define GL_OES_fbo_render_mipmap 1 +#endif /* GL_OES_fbo_render_mipmap */ + +#ifndef GL_OES_fixed_point +#define GL_OES_fixed_point 1 +#define GL_FIXED_OES 0x140C +typedef void (GL_APIENTRYP PFNGLALPHAFUNCXOESPROC) (GLenum func, GLfixed ref); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORXOESPROC) (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHXOESPROC) (GLfixed depth); +typedef void (GL_APIENTRYP PFNGLCLIPPLANEXOESPROC) (GLenum plane, const GLfixed *equation); +typedef void (GL_APIENTRYP PFNGLCOLOR4XOESPROC) (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEXOESPROC) (GLfixed n, GLfixed f); +typedef void (GL_APIENTRYP PFNGLFOGXOESPROC) (GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLFOGXVOESPROC) (GLenum pname, const GLfixed *param); +typedef void (GL_APIENTRYP PFNGLFRUSTUMXOESPROC) (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +typedef void (GL_APIENTRYP PFNGLGETCLIPPLANEXOESPROC) (GLenum plane, GLfixed *equation); +typedef void (GL_APIENTRYP PFNGLGETFIXEDVOESPROC) (GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLGETTEXENVXVOESPROC) (GLenum target, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERXVOESPROC) (GLenum target, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLLIGHTMODELXOESPROC) (GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLLIGHTMODELXVOESPROC) (GLenum pname, const GLfixed *param); +typedef void (GL_APIENTRYP PFNGLLIGHTXOESPROC) (GLenum light, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLLIGHTXVOESPROC) (GLenum light, GLenum pname, const GLfixed *params); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHXOESPROC) (GLfixed width); +typedef void (GL_APIENTRYP PFNGLLOADMATRIXXOESPROC) (const GLfixed *m); +typedef void (GL_APIENTRYP PFNGLMATERIALXOESPROC) (GLenum face, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLMATERIALXVOESPROC) (GLenum face, GLenum pname, const GLfixed *param); +typedef void (GL_APIENTRYP PFNGLMULTMATRIXXOESPROC) (const GLfixed *m); +typedef void (GL_APIENTRYP PFNGLMULTITEXCOORD4XOESPROC) (GLenum texture, GLfixed s, GLfixed t, GLfixed r, GLfixed q); +typedef void (GL_APIENTRYP PFNGLNORMAL3XOESPROC) (GLfixed nx, GLfixed ny, GLfixed nz); +typedef void (GL_APIENTRYP PFNGLORTHOXOESPROC) (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +typedef void (GL_APIENTRYP PFNGLPOINTPARAMETERXVOESPROC) (GLenum pname, const GLfixed *params); +typedef void (GL_APIENTRYP PFNGLPOINTSIZEXOESPROC) (GLfixed size); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETXOESPROC) (GLfixed factor, GLfixed units); +typedef void (GL_APIENTRYP PFNGLROTATEXOESPROC) (GLfixed angle, GLfixed x, GLfixed y, GLfixed z); +typedef void (GL_APIENTRYP PFNGLSCALEXOESPROC) (GLfixed x, GLfixed y, GLfixed z); +typedef void (GL_APIENTRYP PFNGLTEXENVXOESPROC) (GLenum target, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLTEXENVXVOESPROC) (GLenum target, GLenum pname, const GLfixed *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERXOESPROC) (GLenum target, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERXVOESPROC) (GLenum target, GLenum pname, const GLfixed *params); +typedef void (GL_APIENTRYP PFNGLTRANSLATEXOESPROC) (GLfixed x, GLfixed y, GLfixed z); +typedef void (GL_APIENTRYP PFNGLGETLIGHTXVOESPROC) (GLenum light, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLGETMATERIALXVOESPROC) (GLenum face, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLPOINTPARAMETERXOESPROC) (GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEXOESPROC) (GLclampx value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLGETTEXGENXVOESPROC) (GLenum coord, GLenum pname, GLfixed *params); +typedef void (GL_APIENTRYP PFNGLTEXGENXOESPROC) (GLenum coord, GLenum pname, GLfixed param); +typedef void (GL_APIENTRYP PFNGLTEXGENXVOESPROC) (GLenum coord, GLenum pname, const GLfixed *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glAlphaFuncxOES (GLenum func, GLfixed ref); +GL_API void GL_APIENTRY glClearColorxOES (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +GL_API void GL_APIENTRY glClearDepthxOES (GLfixed depth); +GL_API void GL_APIENTRY glClipPlanexOES (GLenum plane, const GLfixed *equation); +GL_API void GL_APIENTRY glColor4xOES (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha); +GL_API void GL_APIENTRY glDepthRangexOES (GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glFogxOES (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glFogxvOES (GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glFrustumxOES (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glGetClipPlanexOES (GLenum plane, GLfixed *equation); +GL_API void GL_APIENTRY glGetFixedvOES (GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetTexEnvxvOES (GLenum target, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetTexParameterxvOES (GLenum target, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glLightModelxOES (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glLightModelxvOES (GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glLightxOES (GLenum light, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glLightxvOES (GLenum light, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glLineWidthxOES (GLfixed width); +GL_API void GL_APIENTRY glLoadMatrixxOES (const GLfixed *m); +GL_API void GL_APIENTRY glMaterialxOES (GLenum face, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glMaterialxvOES (GLenum face, GLenum pname, const GLfixed *param); +GL_API void GL_APIENTRY glMultMatrixxOES (const GLfixed *m); +GL_API void GL_APIENTRY glMultiTexCoord4xOES (GLenum texture, GLfixed s, GLfixed t, GLfixed r, GLfixed q); +GL_API void GL_APIENTRY glNormal3xOES (GLfixed nx, GLfixed ny, GLfixed nz); +GL_API void GL_APIENTRY glOrthoxOES (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f); +GL_API void GL_APIENTRY glPointParameterxvOES (GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glPointSizexOES (GLfixed size); +GL_API void GL_APIENTRY glPolygonOffsetxOES (GLfixed factor, GLfixed units); +GL_API void GL_APIENTRY glRotatexOES (GLfixed angle, GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glScalexOES (GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glTexEnvxOES (GLenum target, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexEnvxvOES (GLenum target, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glTexParameterxOES (GLenum target, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexParameterxvOES (GLenum target, GLenum pname, const GLfixed *params); +GL_API void GL_APIENTRY glTranslatexOES (GLfixed x, GLfixed y, GLfixed z); +GL_API void GL_APIENTRY glGetLightxvOES (GLenum light, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glGetMaterialxvOES (GLenum face, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glPointParameterxOES (GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glSampleCoveragexOES (GLclampx value, GLboolean invert); +GL_API void GL_APIENTRY glGetTexGenxvOES (GLenum coord, GLenum pname, GLfixed *params); +GL_API void GL_APIENTRY glTexGenxOES (GLenum coord, GLenum pname, GLfixed param); +GL_API void GL_APIENTRY glTexGenxvOES (GLenum coord, GLenum pname, const GLfixed *params); +#endif +#endif /* GL_OES_fixed_point */ + +#ifndef GL_OES_framebuffer_object +#define GL_OES_framebuffer_object 1 +#define GL_NONE_OES 0 +#define GL_FRAMEBUFFER_OES 0x8D40 +#define GL_RENDERBUFFER_OES 0x8D41 +#define GL_RGBA4_OES 0x8056 +#define GL_RGB5_A1_OES 0x8057 +#define GL_RGB565_OES 0x8D62 +#define GL_DEPTH_COMPONENT16_OES 0x81A5 +#define GL_RENDERBUFFER_WIDTH_OES 0x8D42 +#define GL_RENDERBUFFER_HEIGHT_OES 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT_OES 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE_OES 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE_OES 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE_OES 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE_OES 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE_OES 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE_OES 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_OES 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_OES 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_OES 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_OES 0x8CD3 +#define GL_COLOR_ATTACHMENT0_OES 0x8CE0 +#define GL_DEPTH_ATTACHMENT_OES 0x8D00 +#define GL_STENCIL_ATTACHMENT_OES 0x8D20 +#define GL_FRAMEBUFFER_COMPLETE_OES 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_OES 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_OES 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_OES 0x8CD9 +#define GL_FRAMEBUFFER_INCOMPLETE_FORMATS_OES 0x8CDA +#define GL_FRAMEBUFFER_UNSUPPORTED_OES 0x8CDD +#define GL_FRAMEBUFFER_BINDING_OES 0x8CA6 +#define GL_RENDERBUFFER_BINDING_OES 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE_OES 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION_OES 0x0506 +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFEROESPROC) (GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFEROESPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSOESPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSOESPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEOESPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVOESPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFEROESPROC) (GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFEROESPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSOESPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSOESPROC) (GLsizei n, GLuint *framebuffers); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSOESPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFEROESPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DOESPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVOESPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPOESPROC) (GLenum target); +#ifdef GL_GLEXT_PROTOTYPES +GL_API GLboolean GL_APIENTRY glIsRenderbufferOES (GLuint renderbuffer); +GL_API void GL_APIENTRY glBindRenderbufferOES (GLenum target, GLuint renderbuffer); +GL_API void GL_APIENTRY glDeleteRenderbuffersOES (GLsizei n, const GLuint *renderbuffers); +GL_API void GL_APIENTRY glGenRenderbuffersOES (GLsizei n, GLuint *renderbuffers); +GL_API void GL_APIENTRY glRenderbufferStorageOES (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glGetRenderbufferParameterivOES (GLenum target, GLenum pname, GLint *params); +GL_API GLboolean GL_APIENTRY glIsFramebufferOES (GLuint framebuffer); +GL_API void GL_APIENTRY glBindFramebufferOES (GLenum target, GLuint framebuffer); +GL_API void GL_APIENTRY glDeleteFramebuffersOES (GLsizei n, const GLuint *framebuffers); +GL_API void GL_APIENTRY glGenFramebuffersOES (GLsizei n, GLuint *framebuffers); +GL_API GLenum GL_APIENTRY glCheckFramebufferStatusOES (GLenum target); +GL_API void GL_APIENTRY glFramebufferRenderbufferOES (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_API void GL_APIENTRY glFramebufferTexture2DOES (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_API void GL_APIENTRY glGetFramebufferAttachmentParameterivOES (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glGenerateMipmapOES (GLenum target); +#endif +#endif /* GL_OES_framebuffer_object */ + +#ifndef GL_OES_mapbuffer +#define GL_OES_mapbuffer 1 +#define GL_WRITE_ONLY_OES 0x88B9 +#define GL_BUFFER_ACCESS_OES 0x88BB +#define GL_BUFFER_MAPPED_OES 0x88BC +#define GL_BUFFER_MAP_POINTER_OES 0x88BD +typedef void *(GL_APIENTRYP PFNGLMAPBUFFEROESPROC) (GLenum target, GLenum access); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFEROESPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVOESPROC) (GLenum target, GLenum pname, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void *GL_APIENTRY glMapBufferOES (GLenum target, GLenum access); +GL_API GLboolean GL_APIENTRY glUnmapBufferOES (GLenum target); +GL_API void GL_APIENTRY glGetBufferPointervOES (GLenum target, GLenum pname, void **params); +#endif +#endif /* GL_OES_mapbuffer */ + +#ifndef GL_OES_matrix_get +#define GL_OES_matrix_get 1 +#define GL_MODELVIEW_MATRIX_FLOAT_AS_INT_BITS_OES 0x898D +#define GL_PROJECTION_MATRIX_FLOAT_AS_INT_BITS_OES 0x898E +#define GL_TEXTURE_MATRIX_FLOAT_AS_INT_BITS_OES 0x898F +#endif /* GL_OES_matrix_get */ + +#ifndef GL_OES_matrix_palette +#define GL_OES_matrix_palette 1 +#define GL_MAX_VERTEX_UNITS_OES 0x86A4 +#define GL_MAX_PALETTE_MATRICES_OES 0x8842 +#define GL_MATRIX_PALETTE_OES 0x8840 +#define GL_MATRIX_INDEX_ARRAY_OES 0x8844 +#define GL_WEIGHT_ARRAY_OES 0x86AD +#define GL_CURRENT_PALETTE_MATRIX_OES 0x8843 +#define GL_MATRIX_INDEX_ARRAY_SIZE_OES 0x8846 +#define GL_MATRIX_INDEX_ARRAY_TYPE_OES 0x8847 +#define GL_MATRIX_INDEX_ARRAY_STRIDE_OES 0x8848 +#define GL_MATRIX_INDEX_ARRAY_POINTER_OES 0x8849 +#define GL_MATRIX_INDEX_ARRAY_BUFFER_BINDING_OES 0x8B9E +#define GL_WEIGHT_ARRAY_SIZE_OES 0x86AB +#define GL_WEIGHT_ARRAY_TYPE_OES 0x86A9 +#define GL_WEIGHT_ARRAY_STRIDE_OES 0x86AA +#define GL_WEIGHT_ARRAY_POINTER_OES 0x86AC +#define GL_WEIGHT_ARRAY_BUFFER_BINDING_OES 0x889E +typedef void (GL_APIENTRYP PFNGLCURRENTPALETTEMATRIXOESPROC) (GLuint matrixpaletteindex); +typedef void (GL_APIENTRYP PFNGLLOADPALETTEFROMMODELVIEWMATRIXOESPROC) (void); +typedef void (GL_APIENTRYP PFNGLMATRIXINDEXPOINTEROESPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLWEIGHTPOINTEROESPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glCurrentPaletteMatrixOES (GLuint matrixpaletteindex); +GL_API void GL_APIENTRY glLoadPaletteFromModelViewMatrixOES (void); +GL_API void GL_APIENTRY glMatrixIndexPointerOES (GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_API void GL_APIENTRY glWeightPointerOES (GLint size, GLenum type, GLsizei stride, const void *pointer); +#endif +#endif /* GL_OES_matrix_palette */ + +#ifndef GL_OES_packed_depth_stencil +#define GL_OES_packed_depth_stencil 1 +#define GL_DEPTH_STENCIL_OES 0x84F9 +#define GL_UNSIGNED_INT_24_8_OES 0x84FA +#define GL_DEPTH24_STENCIL8_OES 0x88F0 +#endif /* GL_OES_packed_depth_stencil */ + +#ifndef GL_OES_query_matrix +#define GL_OES_query_matrix 1 +typedef GLbitfield (GL_APIENTRYP PFNGLQUERYMATRIXXOESPROC) (GLfixed *mantissa, GLint *exponent); +#ifdef GL_GLEXT_PROTOTYPES +GL_API GLbitfield GL_APIENTRY glQueryMatrixxOES (GLfixed *mantissa, GLint *exponent); +#endif +#endif /* GL_OES_query_matrix */ + +#ifndef GL_OES_required_internalformat +#define GL_OES_required_internalformat 1 +#define GL_ALPHA8_OES 0x803C +#define GL_LUMINANCE4_ALPHA4_OES 0x8043 +#define GL_LUMINANCE8_ALPHA8_OES 0x8045 +#define GL_LUMINANCE8_OES 0x8040 +#define GL_RGB8_OES 0x8051 +#define GL_RGBA8_OES 0x8058 +#define GL_RGB10_EXT 0x8052 +#define GL_RGB10_A2_EXT 0x8059 +#endif /* GL_OES_required_internalformat */ + +#ifndef GL_OES_rgb8_rgba8 +#define GL_OES_rgb8_rgba8 1 +#endif /* GL_OES_rgb8_rgba8 */ + +#ifndef GL_OES_single_precision +#define GL_OES_single_precision 1 +typedef khronos_float_t GLclampf; +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFOESPROC) (GLclampf depth); +typedef void (GL_APIENTRYP PFNGLCLIPPLANEFOESPROC) (GLenum plane, const GLfloat *equation); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFOESPROC) (GLclampf n, GLclampf f); +typedef void (GL_APIENTRYP PFNGLFRUSTUMFOESPROC) (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLGETCLIPPLANEFOESPROC) (GLenum plane, GLfloat *equation); +typedef void (GL_APIENTRYP PFNGLORTHOFOESPROC) (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glClearDepthfOES (GLclampf depth); +GL_API void GL_APIENTRY glClipPlanefOES (GLenum plane, const GLfloat *equation); +GL_API void GL_APIENTRY glDepthRangefOES (GLclampf n, GLclampf f); +GL_API void GL_APIENTRY glFrustumfOES (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +GL_API void GL_APIENTRY glGetClipPlanefOES (GLenum plane, GLfloat *equation); +GL_API void GL_APIENTRY glOrthofOES (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f); +#endif +#endif /* GL_OES_single_precision */ + +#ifndef GL_OES_stencil1 +#define GL_OES_stencil1 1 +#define GL_STENCIL_INDEX1_OES 0x8D46 +#endif /* GL_OES_stencil1 */ + +#ifndef GL_OES_stencil4 +#define GL_OES_stencil4 1 +#define GL_STENCIL_INDEX4_OES 0x8D47 +#endif /* GL_OES_stencil4 */ + +#ifndef GL_OES_stencil8 +#define GL_OES_stencil8 1 +#define GL_STENCIL_INDEX8_OES 0x8D48 +#endif /* GL_OES_stencil8 */ + +#ifndef GL_OES_stencil_wrap +#define GL_OES_stencil_wrap 1 +#define GL_INCR_WRAP_OES 0x8507 +#define GL_DECR_WRAP_OES 0x8508 +#endif /* GL_OES_stencil_wrap */ + +#ifndef GL_OES_texture_cube_map +#define GL_OES_texture_cube_map 1 +#define GL_NORMAL_MAP_OES 0x8511 +#define GL_REFLECTION_MAP_OES 0x8512 +#define GL_TEXTURE_CUBE_MAP_OES 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP_OES 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X_OES 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X_OES 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y_OES 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_OES 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z_OES 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_OES 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE_OES 0x851C +#define GL_TEXTURE_GEN_MODE_OES 0x2500 +#define GL_TEXTURE_GEN_STR_OES 0x8D60 +typedef void (GL_APIENTRYP PFNGLTEXGENFOESPROC) (GLenum coord, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXGENFVOESPROC) (GLenum coord, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXGENIOESPROC) (GLenum coord, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXGENIVOESPROC) (GLenum coord, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXGENFVOESPROC) (GLenum coord, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXGENIVOESPROC) (GLenum coord, GLenum pname, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glTexGenfOES (GLenum coord, GLenum pname, GLfloat param); +GL_API void GL_APIENTRY glTexGenfvOES (GLenum coord, GLenum pname, const GLfloat *params); +GL_API void GL_APIENTRY glTexGeniOES (GLenum coord, GLenum pname, GLint param); +GL_API void GL_APIENTRY glTexGenivOES (GLenum coord, GLenum pname, const GLint *params); +GL_API void GL_APIENTRY glGetTexGenfvOES (GLenum coord, GLenum pname, GLfloat *params); +GL_API void GL_APIENTRY glGetTexGenivOES (GLenum coord, GLenum pname, GLint *params); +#endif +#endif /* GL_OES_texture_cube_map */ + +#ifndef GL_OES_texture_env_crossbar +#define GL_OES_texture_env_crossbar 1 +#endif /* GL_OES_texture_env_crossbar */ + +#ifndef GL_OES_texture_mirrored_repeat +#define GL_OES_texture_mirrored_repeat 1 +#define GL_MIRRORED_REPEAT_OES 0x8370 +#endif /* GL_OES_texture_mirrored_repeat */ + +#ifndef GL_OES_vertex_array_object +#define GL_OES_vertex_array_object 1 +#define GL_VERTEX_ARRAY_BINDING_OES 0x85B5 +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYOESPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSOESPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSOESPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYOESPROC) (GLuint array); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glBindVertexArrayOES (GLuint array); +GL_API void GL_APIENTRY glDeleteVertexArraysOES (GLsizei n, const GLuint *arrays); +GL_API void GL_APIENTRY glGenVertexArraysOES (GLsizei n, GLuint *arrays); +GL_API GLboolean GL_APIENTRY glIsVertexArrayOES (GLuint array); +#endif +#endif /* GL_OES_vertex_array_object */ + +#ifndef GL_AMD_compressed_3DC_texture +#define GL_AMD_compressed_3DC_texture 1 +#define GL_3DC_X_AMD 0x87F9 +#define GL_3DC_XY_AMD 0x87FA +#endif /* GL_AMD_compressed_3DC_texture */ + +#ifndef GL_AMD_compressed_ATC_texture +#define GL_AMD_compressed_ATC_texture 1 +#define GL_ATC_RGB_AMD 0x8C92 +#define GL_ATC_RGBA_EXPLICIT_ALPHA_AMD 0x8C93 +#define GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD 0x87EE +#endif /* GL_AMD_compressed_ATC_texture */ + +#ifndef GL_APPLE_copy_texture_levels +#define GL_APPLE_copy_texture_levels 1 +typedef void (GL_APIENTRYP PFNGLCOPYTEXTURELEVELSAPPLEPROC) (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glCopyTextureLevelsAPPLE (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount); +#endif +#endif /* GL_APPLE_copy_texture_levels */ + +#ifndef GL_APPLE_framebuffer_multisample +#define GL_APPLE_framebuffer_multisample 1 +#define GL_RENDERBUFFER_SAMPLES_APPLE 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE 0x8D56 +#define GL_MAX_SAMPLES_APPLE 0x8D57 +#define GL_READ_FRAMEBUFFER_APPLE 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_APPLE 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_APPLE 0x8CA6 +#define GL_READ_FRAMEBUFFER_BINDING_APPLE 0x8CAA +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEAPPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLRESOLVEMULTISAMPLEFRAMEBUFFERAPPLEPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glRenderbufferStorageMultisampleAPPLE (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glResolveMultisampleFramebufferAPPLE (void); +#endif +#endif /* GL_APPLE_framebuffer_multisample */ + +#ifndef GL_APPLE_sync +#define GL_APPLE_sync 1 +typedef struct __GLsync *GLsync; +typedef khronos_uint64_t GLuint64; +typedef khronos_int64_t GLint64; +#define GL_SYNC_OBJECT_APPLE 0x8A53 +#define GL_MAX_SERVER_WAIT_TIMEOUT_APPLE 0x9111 +#define GL_OBJECT_TYPE_APPLE 0x9112 +#define GL_SYNC_CONDITION_APPLE 0x9113 +#define GL_SYNC_STATUS_APPLE 0x9114 +#define GL_SYNC_FLAGS_APPLE 0x9115 +#define GL_SYNC_FENCE_APPLE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE_APPLE 0x9117 +#define GL_UNSIGNALED_APPLE 0x9118 +#define GL_SIGNALED_APPLE 0x9119 +#define GL_ALREADY_SIGNALED_APPLE 0x911A +#define GL_TIMEOUT_EXPIRED_APPLE 0x911B +#define GL_CONDITION_SATISFIED_APPLE 0x911C +#define GL_WAIT_FAILED_APPLE 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT_APPLE 0x00000001 +#define GL_TIMEOUT_IGNORED_APPLE 0xFFFFFFFFFFFFFFFFull +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCAPPLEPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCAPPLEPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCAPPLEPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VAPPLEPROC) (GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVAPPLEPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#ifdef GL_GLEXT_PROTOTYPES +GL_API GLsync GL_APIENTRY glFenceSyncAPPLE (GLenum condition, GLbitfield flags); +GL_API GLboolean GL_APIENTRY glIsSyncAPPLE (GLsync sync); +GL_API void GL_APIENTRY glDeleteSyncAPPLE (GLsync sync); +GL_API GLenum GL_APIENTRY glClientWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_API void GL_APIENTRY glWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_API void GL_APIENTRY glGetInteger64vAPPLE (GLenum pname, GLint64 *params); +GL_API void GL_APIENTRY glGetSyncivAPPLE (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#endif +#endif /* GL_APPLE_sync */ + +#ifndef GL_APPLE_texture_2D_limited_npot +#define GL_APPLE_texture_2D_limited_npot 1 +#endif /* GL_APPLE_texture_2D_limited_npot */ + +#ifndef GL_APPLE_texture_format_BGRA8888 +#define GL_APPLE_texture_format_BGRA8888 1 +#define GL_BGRA_EXT 0x80E1 +#define GL_BGRA8_EXT 0x93A1 +#endif /* GL_APPLE_texture_format_BGRA8888 */ + +#ifndef GL_APPLE_texture_max_level +#define GL_APPLE_texture_max_level 1 +#define GL_TEXTURE_MAX_LEVEL_APPLE 0x813D +#endif /* GL_APPLE_texture_max_level */ + +#ifndef GL_ARM_rgba8 +#define GL_ARM_rgba8 1 +#endif /* GL_ARM_rgba8 */ + +#ifndef GL_EXT_blend_minmax +#define GL_EXT_blend_minmax 1 +#define GL_MIN_EXT 0x8007 +#define GL_MAX_EXT 0x8008 +#endif /* GL_EXT_blend_minmax */ + +#ifndef GL_EXT_discard_framebuffer +#define GL_EXT_discard_framebuffer 1 +#define GL_COLOR_EXT 0x1800 +#define GL_DEPTH_EXT 0x1801 +#define GL_STENCIL_EXT 0x1802 +typedef void (GL_APIENTRYP PFNGLDISCARDFRAMEBUFFEREXTPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glDiscardFramebufferEXT (GLenum target, GLsizei numAttachments, const GLenum *attachments); +#endif +#endif /* GL_EXT_discard_framebuffer */ + +#ifndef GL_EXT_map_buffer_range +#define GL_EXT_map_buffer_range 1 +#define GL_MAP_READ_BIT_EXT 0x0001 +#define GL_MAP_WRITE_BIT_EXT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT_EXT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT_EXT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT_EXT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT_EXT 0x0020 +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void *GL_APIENTRY glMapBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_API void GL_APIENTRY glFlushMappedBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length); +#endif +#endif /* GL_EXT_map_buffer_range */ + +#ifndef GL_EXT_multi_draw_arrays +#define GL_EXT_multi_draw_arrays 1 +typedef void (GL_APIENTRYP PFNGLMULTIDRAWARRAYSEXTPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glMultiDrawArraysEXT (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +GL_API void GL_APIENTRY glMultiDrawElementsEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount); +#endif +#endif /* GL_EXT_multi_draw_arrays */ + +#ifndef GL_EXT_multisampled_render_to_texture +#define GL_EXT_multisampled_render_to_texture 1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT 0x8D6C +#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT 0x8D56 +#define GL_MAX_SAMPLES_EXT 0x8D57 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glRenderbufferStorageMultisampleEXT (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glFramebufferTexture2DMultisampleEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#endif +#endif /* GL_EXT_multisampled_render_to_texture */ + +#ifndef GL_EXT_read_format_bgra +#define GL_EXT_read_format_bgra 1 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV_EXT 0x8365 +#define GL_UNSIGNED_SHORT_1_5_5_5_REV_EXT 0x8366 +#endif /* GL_EXT_read_format_bgra */ + +#ifndef GL_EXT_robustness +#define GL_EXT_robustness 1 +#define GL_GUILTY_CONTEXT_RESET_EXT 0x8253 +#define GL_INNOCENT_CONTEXT_RESET_EXT 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET_EXT 0x8255 +#define GL_CONTEXT_ROBUST_ACCESS_EXT 0x90F3 +#define GL_RESET_NOTIFICATION_STRATEGY_EXT 0x8256 +#define GL_LOSE_CONTEXT_ON_RESET_EXT 0x8252 +#define GL_NO_RESET_NOTIFICATION_EXT 0x8261 +typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSEXTPROC) (void); +typedef void (GL_APIENTRYP PFNGLREADNPIXELSEXTPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API GLenum GL_APIENTRY glGetGraphicsResetStatusEXT (void); +GL_API void GL_APIENTRY glReadnPixelsEXT (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +GL_API void GL_APIENTRY glGetnUniformfvEXT (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GL_API void GL_APIENTRY glGetnUniformivEXT (GLuint program, GLint location, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_EXT_robustness */ + +#ifndef GL_EXT_sRGB +#define GL_EXT_sRGB 1 +#define GL_SRGB_EXT 0x8C40 +#define GL_SRGB_ALPHA_EXT 0x8C42 +#define GL_SRGB8_ALPHA8_EXT 0x8C43 +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT 0x8210 +#endif /* GL_EXT_sRGB */ + +#ifndef GL_EXT_texture_compression_dxt1 +#define GL_EXT_texture_compression_dxt1 1 +#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0 +#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1 +#endif /* GL_EXT_texture_compression_dxt1 */ + +#ifndef GL_EXT_texture_filter_anisotropic +#define GL_EXT_texture_filter_anisotropic 1 +#define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE +#define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF +#endif /* GL_EXT_texture_filter_anisotropic */ + +#ifndef GL_EXT_texture_format_BGRA8888 +#define GL_EXT_texture_format_BGRA8888 1 +#endif /* GL_EXT_texture_format_BGRA8888 */ + +#ifndef GL_EXT_texture_lod_bias +#define GL_EXT_texture_lod_bias 1 +#define GL_MAX_TEXTURE_LOD_BIAS_EXT 0x84FD +#define GL_TEXTURE_FILTER_CONTROL_EXT 0x8500 +#define GL_TEXTURE_LOD_BIAS_EXT 0x8501 +#endif /* GL_EXT_texture_lod_bias */ + +#ifndef GL_EXT_texture_storage +#define GL_EXT_texture_storage 1 +#define GL_TEXTURE_IMMUTABLE_FORMAT_EXT 0x912F +#define GL_ALPHA8_EXT 0x803C +#define GL_LUMINANCE8_EXT 0x8040 +#define GL_LUMINANCE8_ALPHA8_EXT 0x8045 +#define GL_RGBA32F_EXT 0x8814 +#define GL_RGB32F_EXT 0x8815 +#define GL_ALPHA32F_EXT 0x8816 +#define GL_LUMINANCE32F_EXT 0x8818 +#define GL_LUMINANCE_ALPHA32F_EXT 0x8819 +#define GL_RGBA16F_EXT 0x881A +#define GL_RGB16F_EXT 0x881B +#define GL_ALPHA16F_EXT 0x881C +#define GL_LUMINANCE16F_EXT 0x881E +#define GL_LUMINANCE_ALPHA16F_EXT 0x881F +#define GL_R8_EXT 0x8229 +#define GL_RG8_EXT 0x822B +#define GL_R32F_EXT 0x822E +#define GL_RG32F_EXT 0x8230 +#define GL_R16F_EXT 0x822D +#define GL_RG16F_EXT 0x822F +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE1DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE1DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE2DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE3DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glTexStorage1DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GL_API void GL_APIENTRY glTexStorage2DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glTexStorage3DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_API void GL_APIENTRY glTextureStorage1DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GL_API void GL_APIENTRY glTextureStorage2DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glTextureStorage3DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#endif +#endif /* GL_EXT_texture_storage */ + +#ifndef GL_IMG_multisampled_render_to_texture +#define GL_IMG_multisampled_render_to_texture 1 +#define GL_RENDERBUFFER_SAMPLES_IMG 0x9133 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG 0x9134 +#define GL_MAX_SAMPLES_IMG 0x9135 +#define GL_TEXTURE_SAMPLES_IMG 0x9136 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEIMGPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEIMGPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glRenderbufferStorageMultisampleIMG (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_API void GL_APIENTRY glFramebufferTexture2DMultisampleIMG (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#endif +#endif /* GL_IMG_multisampled_render_to_texture */ + +#ifndef GL_IMG_read_format +#define GL_IMG_read_format 1 +#define GL_BGRA_IMG 0x80E1 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV_IMG 0x8365 +#endif /* GL_IMG_read_format */ + +#ifndef GL_IMG_texture_compression_pvrtc +#define GL_IMG_texture_compression_pvrtc 1 +#define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00 +#define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01 +#define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02 +#define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03 +#endif /* GL_IMG_texture_compression_pvrtc */ + +#ifndef GL_IMG_texture_env_enhanced_fixed_function +#define GL_IMG_texture_env_enhanced_fixed_function 1 +#define GL_MODULATE_COLOR_IMG 0x8C04 +#define GL_RECIP_ADD_SIGNED_ALPHA_IMG 0x8C05 +#define GL_TEXTURE_ALPHA_MODULATE_IMG 0x8C06 +#define GL_FACTOR_ALPHA_MODULATE_IMG 0x8C07 +#define GL_FRAGMENT_ALPHA_MODULATE_IMG 0x8C08 +#define GL_ADD_BLEND_IMG 0x8C09 +#define GL_DOT3_RGBA_IMG 0x86AF +#endif /* GL_IMG_texture_env_enhanced_fixed_function */ + +#ifndef GL_IMG_user_clip_plane +#define GL_IMG_user_clip_plane 1 +#define GL_CLIP_PLANE0_IMG 0x3000 +#define GL_CLIP_PLANE1_IMG 0x3001 +#define GL_CLIP_PLANE2_IMG 0x3002 +#define GL_CLIP_PLANE3_IMG 0x3003 +#define GL_CLIP_PLANE4_IMG 0x3004 +#define GL_CLIP_PLANE5_IMG 0x3005 +#define GL_MAX_CLIP_PLANES_IMG 0x0D32 +typedef void (GL_APIENTRYP PFNGLCLIPPLANEFIMGPROC) (GLenum p, const GLfloat *eqn); +typedef void (GL_APIENTRYP PFNGLCLIPPLANEXIMGPROC) (GLenum p, const GLfixed *eqn); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glClipPlanefIMG (GLenum p, const GLfloat *eqn); +GL_API void GL_APIENTRY glClipPlanexIMG (GLenum p, const GLfixed *eqn); +#endif +#endif /* GL_IMG_user_clip_plane */ + +#ifndef GL_NV_fence +#define GL_NV_fence 1 +#define GL_ALL_COMPLETED_NV 0x84F2 +#define GL_FENCE_STATUS_NV 0x84F3 +#define GL_FENCE_CONDITION_NV 0x84F4 +typedef void (GL_APIENTRYP PFNGLDELETEFENCESNVPROC) (GLsizei n, const GLuint *fences); +typedef void (GL_APIENTRYP PFNGLGENFENCESNVPROC) (GLsizei n, GLuint *fences); +typedef GLboolean (GL_APIENTRYP PFNGLISFENCENVPROC) (GLuint fence); +typedef GLboolean (GL_APIENTRYP PFNGLTESTFENCENVPROC) (GLuint fence); +typedef void (GL_APIENTRYP PFNGLGETFENCEIVNVPROC) (GLuint fence, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLFINISHFENCENVPROC) (GLuint fence); +typedef void (GL_APIENTRYP PFNGLSETFENCENVPROC) (GLuint fence, GLenum condition); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glDeleteFencesNV (GLsizei n, const GLuint *fences); +GL_API void GL_APIENTRY glGenFencesNV (GLsizei n, GLuint *fences); +GL_API GLboolean GL_APIENTRY glIsFenceNV (GLuint fence); +GL_API GLboolean GL_APIENTRY glTestFenceNV (GLuint fence); +GL_API void GL_APIENTRY glGetFenceivNV (GLuint fence, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glFinishFenceNV (GLuint fence); +GL_API void GL_APIENTRY glSetFenceNV (GLuint fence, GLenum condition); +#endif +#endif /* GL_NV_fence */ + +#ifndef GL_QCOM_driver_control +#define GL_QCOM_driver_control 1 +typedef char GLchar; +typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSQCOMPROC) (GLint *num, GLsizei size, GLuint *driverControls); +typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSTRINGQCOMPROC) (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString); +typedef void (GL_APIENTRYP PFNGLENABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl); +typedef void (GL_APIENTRYP PFNGLDISABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glGetDriverControlsQCOM (GLint *num, GLsizei size, GLuint *driverControls); +GL_API void GL_APIENTRY glGetDriverControlStringQCOM (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString); +GL_API void GL_APIENTRY glEnableDriverControlQCOM (GLuint driverControl); +GL_API void GL_APIENTRY glDisableDriverControlQCOM (GLuint driverControl); +#endif +#endif /* GL_QCOM_driver_control */ + +#ifndef GL_QCOM_extended_get +#define GL_QCOM_extended_get 1 +#define GL_TEXTURE_WIDTH_QCOM 0x8BD2 +#define GL_TEXTURE_HEIGHT_QCOM 0x8BD3 +#define GL_TEXTURE_DEPTH_QCOM 0x8BD4 +#define GL_TEXTURE_INTERNAL_FORMAT_QCOM 0x8BD5 +#define GL_TEXTURE_FORMAT_QCOM 0x8BD6 +#define GL_TEXTURE_TYPE_QCOM 0x8BD7 +#define GL_TEXTURE_IMAGE_VALID_QCOM 0x8BD8 +#define GL_TEXTURE_NUM_LEVELS_QCOM 0x8BD9 +#define GL_TEXTURE_TARGET_QCOM 0x8BDA +#define GL_TEXTURE_OBJECT_VALID_QCOM 0x8BDB +#define GL_STATE_RESTORE 0x8BDC +typedef void (GL_APIENTRYP PFNGLEXTGETTEXTURESQCOMPROC) (GLuint *textures, GLint maxTextures, GLint *numTextures); +typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERSQCOMPROC) (GLuint *buffers, GLint maxBuffers, GLint *numBuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETRENDERBUFFERSQCOMPROC) (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETFRAMEBUFFERSQCOMPROC) (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETTEXLEVELPARAMETERIVQCOMPROC) (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLEXTTEXOBJECTSTATEOVERRIDEIQCOMPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLEXTGETTEXSUBIMAGEQCOMPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels); +typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERPOINTERVQCOMPROC) (GLenum target, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glExtGetTexturesQCOM (GLuint *textures, GLint maxTextures, GLint *numTextures); +GL_API void GL_APIENTRY glExtGetBuffersQCOM (GLuint *buffers, GLint maxBuffers, GLint *numBuffers); +GL_API void GL_APIENTRY glExtGetRenderbuffersQCOM (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers); +GL_API void GL_APIENTRY glExtGetFramebuffersQCOM (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers); +GL_API void GL_APIENTRY glExtGetTexLevelParameterivQCOM (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params); +GL_API void GL_APIENTRY glExtTexObjectStateOverrideiQCOM (GLenum target, GLenum pname, GLint param); +GL_API void GL_APIENTRY glExtGetTexSubImageQCOM (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels); +GL_API void GL_APIENTRY glExtGetBufferPointervQCOM (GLenum target, void **params); +#endif +#endif /* GL_QCOM_extended_get */ + +#ifndef GL_QCOM_extended_get2 +#define GL_QCOM_extended_get2 1 +typedef void (GL_APIENTRYP PFNGLEXTGETSHADERSQCOMPROC) (GLuint *shaders, GLint maxShaders, GLint *numShaders); +typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMSQCOMPROC) (GLuint *programs, GLint maxPrograms, GLint *numPrograms); +typedef GLboolean (GL_APIENTRYP PFNGLEXTISPROGRAMBINARYQCOMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMBINARYSOURCEQCOMPROC) (GLuint program, GLenum shadertype, GLchar *source, GLint *length); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glExtGetShadersQCOM (GLuint *shaders, GLint maxShaders, GLint *numShaders); +GL_API void GL_APIENTRY glExtGetProgramsQCOM (GLuint *programs, GLint maxPrograms, GLint *numPrograms); +GL_API GLboolean GL_APIENTRY glExtIsProgramBinaryQCOM (GLuint program); +GL_API void GL_APIENTRY glExtGetProgramBinarySourceQCOM (GLuint program, GLenum shadertype, GLchar *source, GLint *length); +#endif +#endif /* GL_QCOM_extended_get2 */ + +#ifndef GL_QCOM_perfmon_global_mode +#define GL_QCOM_perfmon_global_mode 1 +#define GL_PERFMON_GLOBAL_MODE_QCOM 0x8FA0 +#endif /* GL_QCOM_perfmon_global_mode */ + +#ifndef GL_QCOM_tiled_rendering +#define GL_QCOM_tiled_rendering 1 +#define GL_COLOR_BUFFER_BIT0_QCOM 0x00000001 +#define GL_COLOR_BUFFER_BIT1_QCOM 0x00000002 +#define GL_COLOR_BUFFER_BIT2_QCOM 0x00000004 +#define GL_COLOR_BUFFER_BIT3_QCOM 0x00000008 +#define GL_COLOR_BUFFER_BIT4_QCOM 0x00000010 +#define GL_COLOR_BUFFER_BIT5_QCOM 0x00000020 +#define GL_COLOR_BUFFER_BIT6_QCOM 0x00000040 +#define GL_COLOR_BUFFER_BIT7_QCOM 0x00000080 +#define GL_DEPTH_BUFFER_BIT0_QCOM 0x00000100 +#define GL_DEPTH_BUFFER_BIT1_QCOM 0x00000200 +#define GL_DEPTH_BUFFER_BIT2_QCOM 0x00000400 +#define GL_DEPTH_BUFFER_BIT3_QCOM 0x00000800 +#define GL_DEPTH_BUFFER_BIT4_QCOM 0x00001000 +#define GL_DEPTH_BUFFER_BIT5_QCOM 0x00002000 +#define GL_DEPTH_BUFFER_BIT6_QCOM 0x00004000 +#define GL_DEPTH_BUFFER_BIT7_QCOM 0x00008000 +#define GL_STENCIL_BUFFER_BIT0_QCOM 0x00010000 +#define GL_STENCIL_BUFFER_BIT1_QCOM 0x00020000 +#define GL_STENCIL_BUFFER_BIT2_QCOM 0x00040000 +#define GL_STENCIL_BUFFER_BIT3_QCOM 0x00080000 +#define GL_STENCIL_BUFFER_BIT4_QCOM 0x00100000 +#define GL_STENCIL_BUFFER_BIT5_QCOM 0x00200000 +#define GL_STENCIL_BUFFER_BIT6_QCOM 0x00400000 +#define GL_STENCIL_BUFFER_BIT7_QCOM 0x00800000 +#define GL_MULTISAMPLE_BUFFER_BIT0_QCOM 0x01000000 +#define GL_MULTISAMPLE_BUFFER_BIT1_QCOM 0x02000000 +#define GL_MULTISAMPLE_BUFFER_BIT2_QCOM 0x04000000 +#define GL_MULTISAMPLE_BUFFER_BIT3_QCOM 0x08000000 +#define GL_MULTISAMPLE_BUFFER_BIT4_QCOM 0x10000000 +#define GL_MULTISAMPLE_BUFFER_BIT5_QCOM 0x20000000 +#define GL_MULTISAMPLE_BUFFER_BIT6_QCOM 0x40000000 +#define GL_MULTISAMPLE_BUFFER_BIT7_QCOM 0x80000000 +typedef void (GL_APIENTRYP PFNGLSTARTTILINGQCOMPROC) (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask); +typedef void (GL_APIENTRYP PFNGLENDTILINGQCOMPROC) (GLbitfield preserveMask); +#ifdef GL_GLEXT_PROTOTYPES +GL_API void GL_APIENTRY glStartTilingQCOM (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask); +GL_API void GL_APIENTRY glEndTilingQCOM (GLbitfield preserveMask); +#endif +#endif /* GL_QCOM_tiled_rendering */ + +#ifndef GL_QCOM_writeonly_rendering +#define GL_QCOM_writeonly_rendering 1 +#define GL_WRITEONLY_RENDERING_QCOM 0x8823 +#endif /* GL_QCOM_writeonly_rendering */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/GLES/glplatform.h b/sources/pvrsdk/Include/GLES/glplatform.h new file mode 100755 index 00000000..16060a9a --- /dev/null +++ b/sources/pvrsdk/Include/GLES/glplatform.h @@ -0,0 +1,38 @@ +#ifndef __glplatform_h_ +#define __glplatform_h_ + +/* +** Copyright (c) 2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* Platform-specific types and definitions for OpenGL ES 1.X gl.h + * + * Adopters may modify khrplatform.h and this file to suit their platform. + * Please contribute modifications back to Khronos as pull requests on the + * public github repository: + * https://github.com/KhronosGroup/OpenGL-Registry + */ + +#include + +#ifndef GL_API +#define GL_API KHRONOS_APICALL +#endif + +#ifndef GL_APIENTRY +#define GL_APIENTRY KHRONOS_APIENTRY +#endif + +#endif /* __glplatform_h_ */ diff --git a/sources/pvrsdk/Include/GLES2/gl2.h b/sources/pvrsdk/Include/GLES2/gl2.h new file mode 100755 index 00000000..dabe010b --- /dev/null +++ b/sources/pvrsdk/Include/GLES2/gl2.h @@ -0,0 +1,675 @@ +#ifndef __gl2_h_ +#define __gl2_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#include "GLES2/gl2platform.h" + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +#ifndef GL_GLES_PROTOTYPES +#define GL_GLES_PROTOTYPES 1 +#endif + +/* Generated on date 20170209 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9] + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_ES_VERSION_2_0 +#define GL_ES_VERSION_2_0 1 +#include "KHR/khrplatform.h" +typedef khronos_int8_t GLbyte; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLfixed; +typedef short GLshort; +typedef unsigned short GLushort; +typedef void GLvoid; +typedef struct __GLsync *GLsync; +typedef khronos_int64_t GLint64; +typedef khronos_uint64_t GLuint64; +typedef unsigned int GLenum; +typedef unsigned int GLuint; +typedef char GLchar; +typedef khronos_float_t GLfloat; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_uint8_t GLubyte; +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_FUNC_ADD 0x8006 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_BLEND 0x0BE2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_LINE_WIDTH 0x0B21 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VIEWPORT 0x0BA2 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_SHADER_TYPE 0x8B4F +#define GL_DELETE_STATUS 0x8B80 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_INVERT 0x150A +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGB565 0x8D62 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_NONE 0 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s); +typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures); +typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer); +typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode); +typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width); +typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture); +GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target); +GL_APICALL void GL_APIENTRY glClear (GLbitfield mask); +GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d); +GL_APICALL void GL_APIENTRY glClearStencil (GLint s); +GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader); +GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL GLuint GL_APIENTRY glCreateProgram (void); +GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type); +GL_APICALL void GL_APIENTRY glCullFace (GLenum mode); +GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program); +GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader); +GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func); +GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag); +GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glDisable (GLenum cap); +GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glEnable (GLenum cap); +GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glFinish (void); +GL_APICALL void GL_APIENTRY glFlush (void); +GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode); +GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target); +GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL GLenum GL_APIENTRY glGetError (void); +GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); +GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer); +GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program); +GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader); +GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width); +GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program); +GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void); +GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgram (GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program); +GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif +#endif /* GL_ES_VERSION_2_0 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/GLES2/gl2ext.h b/sources/pvrsdk/Include/GLES2/gl2ext.h new file mode 100755 index 00000000..2826a495 --- /dev/null +++ b/sources/pvrsdk/Include/GLES2/gl2ext.h @@ -0,0 +1,3445 @@ +#ifndef __gl2ext_h_ +#define __gl2ext_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +/* Generated on date 20170628 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9] + * Versions emitted: _nomatch_^ + * Default extensions included: gles2 + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_KHR_blend_equation_advanced +#define GL_KHR_blend_equation_advanced 1 +#define GL_MULTIPLY_KHR 0x9294 +#define GL_SCREEN_KHR 0x9295 +#define GL_OVERLAY_KHR 0x9296 +#define GL_DARKEN_KHR 0x9297 +#define GL_LIGHTEN_KHR 0x9298 +#define GL_COLORDODGE_KHR 0x9299 +#define GL_COLORBURN_KHR 0x929A +#define GL_HARDLIGHT_KHR 0x929B +#define GL_SOFTLIGHT_KHR 0x929C +#define GL_DIFFERENCE_KHR 0x929E +#define GL_EXCLUSION_KHR 0x92A0 +#define GL_HSL_HUE_KHR 0x92AD +#define GL_HSL_SATURATION_KHR 0x92AE +#define GL_HSL_COLOR_KHR 0x92AF +#define GL_HSL_LUMINOSITY_KHR 0x92B0 +typedef void (GL_APIENTRYP PFNGLBLENDBARRIERKHRPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlendBarrierKHR (void); +#endif +#endif /* GL_KHR_blend_equation_advanced */ + +#ifndef GL_KHR_blend_equation_advanced_coherent +#define GL_KHR_blend_equation_advanced_coherent 1 +#define GL_BLEND_ADVANCED_COHERENT_KHR 0x9285 +#endif /* GL_KHR_blend_equation_advanced_coherent */ + +#ifndef GL_KHR_context_flush_control +#define GL_KHR_context_flush_control 1 +#define GL_CONTEXT_RELEASE_BEHAVIOR_KHR 0x82FB +#define GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR 0x82FC +#endif /* GL_KHR_context_flush_control */ + +#ifndef GL_KHR_debug +#define GL_KHR_debug 1 +typedef void (GL_APIENTRY *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +#define GL_SAMPLER 0x82E6 +#define GL_DEBUG_OUTPUT_SYNCHRONOUS_KHR 0x8242 +#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_KHR 0x8243 +#define GL_DEBUG_CALLBACK_FUNCTION_KHR 0x8244 +#define GL_DEBUG_CALLBACK_USER_PARAM_KHR 0x8245 +#define GL_DEBUG_SOURCE_API_KHR 0x8246 +#define GL_DEBUG_SOURCE_WINDOW_SYSTEM_KHR 0x8247 +#define GL_DEBUG_SOURCE_SHADER_COMPILER_KHR 0x8248 +#define GL_DEBUG_SOURCE_THIRD_PARTY_KHR 0x8249 +#define GL_DEBUG_SOURCE_APPLICATION_KHR 0x824A +#define GL_DEBUG_SOURCE_OTHER_KHR 0x824B +#define GL_DEBUG_TYPE_ERROR_KHR 0x824C +#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_KHR 0x824D +#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_KHR 0x824E +#define GL_DEBUG_TYPE_PORTABILITY_KHR 0x824F +#define GL_DEBUG_TYPE_PERFORMANCE_KHR 0x8250 +#define GL_DEBUG_TYPE_OTHER_KHR 0x8251 +#define GL_DEBUG_TYPE_MARKER_KHR 0x8268 +#define GL_DEBUG_TYPE_PUSH_GROUP_KHR 0x8269 +#define GL_DEBUG_TYPE_POP_GROUP_KHR 0x826A +#define GL_DEBUG_SEVERITY_NOTIFICATION_KHR 0x826B +#define GL_MAX_DEBUG_GROUP_STACK_DEPTH_KHR 0x826C +#define GL_DEBUG_GROUP_STACK_DEPTH_KHR 0x826D +#define GL_BUFFER_KHR 0x82E0 +#define GL_SHADER_KHR 0x82E1 +#define GL_PROGRAM_KHR 0x82E2 +#define GL_VERTEX_ARRAY_KHR 0x8074 +#define GL_QUERY_KHR 0x82E3 +#define GL_PROGRAM_PIPELINE_KHR 0x82E4 +#define GL_SAMPLER_KHR 0x82E6 +#define GL_MAX_LABEL_LENGTH_KHR 0x82E8 +#define GL_MAX_DEBUG_MESSAGE_LENGTH_KHR 0x9143 +#define GL_MAX_DEBUG_LOGGED_MESSAGES_KHR 0x9144 +#define GL_DEBUG_LOGGED_MESSAGES_KHR 0x9145 +#define GL_DEBUG_SEVERITY_HIGH_KHR 0x9146 +#define GL_DEBUG_SEVERITY_MEDIUM_KHR 0x9147 +#define GL_DEBUG_SEVERITY_LOW_KHR 0x9148 +#define GL_DEBUG_OUTPUT_KHR 0x92E0 +#define GL_CONTEXT_FLAG_DEBUG_BIT_KHR 0x00000002 +#define GL_STACK_OVERFLOW_KHR 0x0503 +#define GL_STACK_UNDERFLOW_KHR 0x0504 +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECONTROLKHRPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGEINSERTKHRPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECALLBACKKHRPROC) (GLDEBUGPROCKHR callback, const void *userParam); +typedef GLuint (GL_APIENTRYP PFNGLGETDEBUGMESSAGELOGKHRPROC) (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +typedef void (GL_APIENTRYP PFNGLPUSHDEBUGGROUPKHRPROC) (GLenum source, GLuint id, GLsizei length, const GLchar *message); +typedef void (GL_APIENTRYP PFNGLPOPDEBUGGROUPKHRPROC) (void); +typedef void (GL_APIENTRYP PFNGLOBJECTLABELKHRPROC) (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTLABELKHRPROC) (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (GL_APIENTRYP PFNGLOBJECTPTRLABELKHRPROC) (const void *ptr, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTPTRLABELKHRPROC) (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETPOINTERVKHRPROC) (GLenum pname, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDebugMessageControlKHR (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +GL_APICALL void GL_APIENTRY glDebugMessageInsertKHR (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +GL_APICALL void GL_APIENTRY glDebugMessageCallbackKHR (GLDEBUGPROCKHR callback, const void *userParam); +GL_APICALL GLuint GL_APIENTRY glGetDebugMessageLogKHR (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +GL_APICALL void GL_APIENTRY glPushDebugGroupKHR (GLenum source, GLuint id, GLsizei length, const GLchar *message); +GL_APICALL void GL_APIENTRY glPopDebugGroupKHR (void); +GL_APICALL void GL_APIENTRY glObjectLabelKHR (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectLabelKHR (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +GL_APICALL void GL_APIENTRY glObjectPtrLabelKHR (const void *ptr, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectPtrLabelKHR (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +GL_APICALL void GL_APIENTRY glGetPointervKHR (GLenum pname, void **params); +#endif +#endif /* GL_KHR_debug */ + +#ifndef GL_KHR_no_error +#define GL_KHR_no_error 1 +#define GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR 0x00000008 +#endif /* GL_KHR_no_error */ + +#ifndef GL_KHR_robust_buffer_access_behavior +#define GL_KHR_robust_buffer_access_behavior 1 +#endif /* GL_KHR_robust_buffer_access_behavior */ + +#ifndef GL_KHR_robustness +#define GL_KHR_robustness 1 +#define GL_CONTEXT_ROBUST_ACCESS_KHR 0x90F3 +#define GL_LOSE_CONTEXT_ON_RESET_KHR 0x8252 +#define GL_GUILTY_CONTEXT_RESET_KHR 0x8253 +#define GL_INNOCENT_CONTEXT_RESET_KHR 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET_KHR 0x8255 +#define GL_RESET_NOTIFICATION_STRATEGY_KHR 0x8256 +#define GL_NO_RESET_NOTIFICATION_KHR 0x8261 +#define GL_CONTEXT_LOST_KHR 0x0507 +typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSKHRPROC) (void); +typedef void (GL_APIENTRYP PFNGLREADNPIXELSKHRPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMUIVKHRPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusKHR (void); +GL_APICALL void GL_APIENTRY glReadnPixelsKHR (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +GL_APICALL void GL_APIENTRY glGetnUniformfvKHR (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetnUniformivKHR (GLuint program, GLint location, GLsizei bufSize, GLint *params); +GL_APICALL void GL_APIENTRY glGetnUniformuivKHR (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +#endif +#endif /* GL_KHR_robustness */ + +#ifndef GL_KHR_texture_compression_astc_hdr +#define GL_KHR_texture_compression_astc_hdr 1 +#define GL_COMPRESSED_RGBA_ASTC_4x4_KHR 0x93B0 +#define GL_COMPRESSED_RGBA_ASTC_5x4_KHR 0x93B1 +#define GL_COMPRESSED_RGBA_ASTC_5x5_KHR 0x93B2 +#define GL_COMPRESSED_RGBA_ASTC_6x5_KHR 0x93B3 +#define GL_COMPRESSED_RGBA_ASTC_6x6_KHR 0x93B4 +#define GL_COMPRESSED_RGBA_ASTC_8x5_KHR 0x93B5 +#define GL_COMPRESSED_RGBA_ASTC_8x6_KHR 0x93B6 +#define GL_COMPRESSED_RGBA_ASTC_8x8_KHR 0x93B7 +#define GL_COMPRESSED_RGBA_ASTC_10x5_KHR 0x93B8 +#define GL_COMPRESSED_RGBA_ASTC_10x6_KHR 0x93B9 +#define GL_COMPRESSED_RGBA_ASTC_10x8_KHR 0x93BA +#define GL_COMPRESSED_RGBA_ASTC_10x10_KHR 0x93BB +#define GL_COMPRESSED_RGBA_ASTC_12x10_KHR 0x93BC +#define GL_COMPRESSED_RGBA_ASTC_12x12_KHR 0x93BD +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR 0x93D0 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR 0x93D1 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR 0x93D2 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR 0x93D3 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR 0x93D4 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR 0x93D5 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR 0x93D6 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR 0x93D7 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR 0x93D8 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR 0x93D9 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR 0x93DA +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR 0x93DB +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR 0x93DC +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR 0x93DD +#endif /* GL_KHR_texture_compression_astc_hdr */ + +#ifndef GL_KHR_texture_compression_astc_ldr +#define GL_KHR_texture_compression_astc_ldr 1 +#endif /* GL_KHR_texture_compression_astc_ldr */ + +#ifndef GL_KHR_texture_compression_astc_sliced_3d +#define GL_KHR_texture_compression_astc_sliced_3d 1 +#endif /* GL_KHR_texture_compression_astc_sliced_3d */ + +#ifndef GL_OES_EGL_image +#define GL_OES_EGL_image 1 +typedef void *GLeglImageOES; +typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) (GLenum target, GLeglImageOES image); +typedef void (GL_APIENTRYP PFNGLEGLIMAGETARGETRENDERBUFFERSTORAGEOESPROC) (GLenum target, GLeglImageOES image); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glEGLImageTargetTexture2DOES (GLenum target, GLeglImageOES image); +GL_APICALL void GL_APIENTRY glEGLImageTargetRenderbufferStorageOES (GLenum target, GLeglImageOES image); +#endif +#endif /* GL_OES_EGL_image */ + +#ifndef GL_OES_EGL_image_external +#define GL_OES_EGL_image_external 1 +#define GL_TEXTURE_EXTERNAL_OES 0x8D65 +#define GL_TEXTURE_BINDING_EXTERNAL_OES 0x8D67 +#define GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES 0x8D68 +#define GL_SAMPLER_EXTERNAL_OES 0x8D66 +#endif /* GL_OES_EGL_image_external */ + +#ifndef GL_OES_EGL_image_external_essl3 +#define GL_OES_EGL_image_external_essl3 1 +#endif /* GL_OES_EGL_image_external_essl3 */ + +#ifndef GL_OES_compressed_ETC1_RGB8_sub_texture +#define GL_OES_compressed_ETC1_RGB8_sub_texture 1 +#endif /* GL_OES_compressed_ETC1_RGB8_sub_texture */ + +#ifndef GL_OES_compressed_ETC1_RGB8_texture +#define GL_OES_compressed_ETC1_RGB8_texture 1 +#define GL_ETC1_RGB8_OES 0x8D64 +#endif /* GL_OES_compressed_ETC1_RGB8_texture */ + +#ifndef GL_OES_compressed_paletted_texture +#define GL_OES_compressed_paletted_texture 1 +#define GL_PALETTE4_RGB8_OES 0x8B90 +#define GL_PALETTE4_RGBA8_OES 0x8B91 +#define GL_PALETTE4_R5_G6_B5_OES 0x8B92 +#define GL_PALETTE4_RGBA4_OES 0x8B93 +#define GL_PALETTE4_RGB5_A1_OES 0x8B94 +#define GL_PALETTE8_RGB8_OES 0x8B95 +#define GL_PALETTE8_RGBA8_OES 0x8B96 +#define GL_PALETTE8_R5_G6_B5_OES 0x8B97 +#define GL_PALETTE8_RGBA4_OES 0x8B98 +#define GL_PALETTE8_RGB5_A1_OES 0x8B99 +#endif /* GL_OES_compressed_paletted_texture */ + +#ifndef GL_OES_copy_image +#define GL_OES_copy_image 1 +typedef void (GL_APIENTRYP PFNGLCOPYIMAGESUBDATAOESPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCopyImageSubDataOES (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#endif +#endif /* GL_OES_copy_image */ + +#ifndef GL_OES_depth24 +#define GL_OES_depth24 1 +#define GL_DEPTH_COMPONENT24_OES 0x81A6 +#endif /* GL_OES_depth24 */ + +#ifndef GL_OES_depth32 +#define GL_OES_depth32 1 +#define GL_DEPTH_COMPONENT32_OES 0x81A7 +#endif /* GL_OES_depth32 */ + +#ifndef GL_OES_depth_texture +#define GL_OES_depth_texture 1 +#endif /* GL_OES_depth_texture */ + +#ifndef GL_OES_draw_buffers_indexed +#define GL_OES_draw_buffers_indexed 1 +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +typedef void (GL_APIENTRYP PFNGLENABLEIOESPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDISABLEIOESPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONIOESPROC) (GLuint buf, GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEIOESPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCIOESPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEIOESPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +typedef void (GL_APIENTRYP PFNGLCOLORMASKIOESPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDIOESPROC) (GLenum target, GLuint index); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glEnableiOES (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisableiOES (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glBlendEquationiOES (GLuint buf, GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparateiOES (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunciOES (GLuint buf, GLenum src, GLenum dst); +GL_APICALL void GL_APIENTRY glBlendFuncSeparateiOES (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +GL_APICALL void GL_APIENTRY glColorMaskiOES (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GL_APICALL GLboolean GL_APIENTRY glIsEnablediOES (GLenum target, GLuint index); +#endif +#endif /* GL_OES_draw_buffers_indexed */ + +#ifndef GL_OES_draw_elements_base_vertex +#define GL_OES_draw_elements_base_vertex 1 +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXOESPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXOESPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXOESPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSBASEVERTEXOESPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawElementsBaseVertexOES (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawRangeElementsBaseVertexOES (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexOES (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +GL_APICALL void GL_APIENTRY glMultiDrawElementsBaseVertexOES (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex); +#endif +#endif /* GL_OES_draw_elements_base_vertex */ + +#ifndef GL_OES_element_index_uint +#define GL_OES_element_index_uint 1 +#endif /* GL_OES_element_index_uint */ + +#ifndef GL_OES_fbo_render_mipmap +#define GL_OES_fbo_render_mipmap 1 +#endif /* GL_OES_fbo_render_mipmap */ + +#ifndef GL_OES_fragment_precision_high +#define GL_OES_fragment_precision_high 1 +#endif /* GL_OES_fragment_precision_high */ + +#ifndef GL_OES_geometry_point_size +#define GL_OES_geometry_point_size 1 +#endif /* GL_OES_geometry_point_size */ + +#ifndef GL_OES_geometry_shader +#define GL_OES_geometry_shader 1 +#define GL_GEOMETRY_SHADER_OES 0x8DD9 +#define GL_GEOMETRY_SHADER_BIT_OES 0x00000004 +#define GL_GEOMETRY_LINKED_VERTICES_OUT_OES 0x8916 +#define GL_GEOMETRY_LINKED_INPUT_TYPE_OES 0x8917 +#define GL_GEOMETRY_LINKED_OUTPUT_TYPE_OES 0x8918 +#define GL_GEOMETRY_SHADER_INVOCATIONS_OES 0x887F +#define GL_LAYER_PROVOKING_VERTEX_OES 0x825E +#define GL_LINES_ADJACENCY_OES 0x000A +#define GL_LINE_STRIP_ADJACENCY_OES 0x000B +#define GL_TRIANGLES_ADJACENCY_OES 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY_OES 0x000D +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_OES 0x8DDF +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS_OES 0x8A2C +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_OES 0x8A32 +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS_OES 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_OES 0x9124 +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_OES 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_OES 0x8DE1 +#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS_OES 0x8E5A +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_OES 0x8C29 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_OES 0x92CF +#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS_OES 0x92D5 +#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS_OES 0x90CD +#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_OES 0x90D7 +#define GL_FIRST_VERTEX_CONVENTION_OES 0x8E4D +#define GL_LAST_VERTEX_CONVENTION_OES 0x8E4E +#define GL_UNDEFINED_VERTEX_OES 0x8260 +#define GL_PRIMITIVES_GENERATED_OES 0x8C87 +#define GL_FRAMEBUFFER_DEFAULT_LAYERS_OES 0x9312 +#define GL_MAX_FRAMEBUFFER_LAYERS_OES 0x9317 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_OES 0x8DA8 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_OES 0x8DA7 +#define GL_REFERENCED_BY_GEOMETRY_SHADER_OES 0x9309 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREOESPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTextureOES (GLenum target, GLenum attachment, GLuint texture, GLint level); +#endif +#endif /* GL_OES_geometry_shader */ + +#ifndef GL_OES_get_program_binary +#define GL_OES_get_program_binary 1 +#define GL_PROGRAM_BINARY_LENGTH_OES 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS_OES 0x87FE +#define GL_PROGRAM_BINARY_FORMATS_OES 0x87FF +typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYOESPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYOESPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLint length); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetProgramBinaryOES (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +GL_APICALL void GL_APIENTRY glProgramBinaryOES (GLuint program, GLenum binaryFormat, const void *binary, GLint length); +#endif +#endif /* GL_OES_get_program_binary */ + +#ifndef GL_OES_gpu_shader5 +#define GL_OES_gpu_shader5 1 +#endif /* GL_OES_gpu_shader5 */ + +#ifndef GL_OES_mapbuffer +#define GL_OES_mapbuffer 1 +#define GL_WRITE_ONLY_OES 0x88B9 +#define GL_BUFFER_ACCESS_OES 0x88BB +#define GL_BUFFER_MAPPED_OES 0x88BC +#define GL_BUFFER_MAP_POINTER_OES 0x88BD +typedef void *(GL_APIENTRYP PFNGLMAPBUFFEROESPROC) (GLenum target, GLenum access); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFEROESPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVOESPROC) (GLenum target, GLenum pname, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void *GL_APIENTRY glMapBufferOES (GLenum target, GLenum access); +GL_APICALL GLboolean GL_APIENTRY glUnmapBufferOES (GLenum target); +GL_APICALL void GL_APIENTRY glGetBufferPointervOES (GLenum target, GLenum pname, void **params); +#endif +#endif /* GL_OES_mapbuffer */ + +#ifndef GL_OES_packed_depth_stencil +#define GL_OES_packed_depth_stencil 1 +#define GL_DEPTH_STENCIL_OES 0x84F9 +#define GL_UNSIGNED_INT_24_8_OES 0x84FA +#define GL_DEPTH24_STENCIL8_OES 0x88F0 +#endif /* GL_OES_packed_depth_stencil */ + +#ifndef GL_OES_primitive_bounding_box +#define GL_OES_primitive_bounding_box 1 +#define GL_PRIMITIVE_BOUNDING_BOX_OES 0x92BE +typedef void (GL_APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXOESPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPrimitiveBoundingBoxOES (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +#endif +#endif /* GL_OES_primitive_bounding_box */ + +#ifndef GL_OES_required_internalformat +#define GL_OES_required_internalformat 1 +#define GL_ALPHA8_OES 0x803C +#define GL_DEPTH_COMPONENT16_OES 0x81A5 +#define GL_LUMINANCE4_ALPHA4_OES 0x8043 +#define GL_LUMINANCE8_ALPHA8_OES 0x8045 +#define GL_LUMINANCE8_OES 0x8040 +#define GL_RGBA4_OES 0x8056 +#define GL_RGB5_A1_OES 0x8057 +#define GL_RGB565_OES 0x8D62 +#define GL_RGB8_OES 0x8051 +#define GL_RGBA8_OES 0x8058 +#define GL_RGB10_EXT 0x8052 +#define GL_RGB10_A2_EXT 0x8059 +#endif /* GL_OES_required_internalformat */ + +#ifndef GL_OES_rgb8_rgba8 +#define GL_OES_rgb8_rgba8 1 +#endif /* GL_OES_rgb8_rgba8 */ + +#ifndef GL_OES_sample_shading +#define GL_OES_sample_shading 1 +#define GL_SAMPLE_SHADING_OES 0x8C36 +#define GL_MIN_SAMPLE_SHADING_VALUE_OES 0x8C37 +typedef void (GL_APIENTRYP PFNGLMINSAMPLESHADINGOESPROC) (GLfloat value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glMinSampleShadingOES (GLfloat value); +#endif +#endif /* GL_OES_sample_shading */ + +#ifndef GL_OES_sample_variables +#define GL_OES_sample_variables 1 +#endif /* GL_OES_sample_variables */ + +#ifndef GL_OES_shader_image_atomic +#define GL_OES_shader_image_atomic 1 +#endif /* GL_OES_shader_image_atomic */ + +#ifndef GL_OES_shader_io_blocks +#define GL_OES_shader_io_blocks 1 +#endif /* GL_OES_shader_io_blocks */ + +#ifndef GL_OES_shader_multisample_interpolation +#define GL_OES_shader_multisample_interpolation 1 +#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_OES 0x8E5B +#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_OES 0x8E5C +#define GL_FRAGMENT_INTERPOLATION_OFFSET_BITS_OES 0x8E5D +#endif /* GL_OES_shader_multisample_interpolation */ + +#ifndef GL_OES_standard_derivatives +#define GL_OES_standard_derivatives 1 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES 0x8B8B +#endif /* GL_OES_standard_derivatives */ + +#ifndef GL_OES_stencil1 +#define GL_OES_stencil1 1 +#define GL_STENCIL_INDEX1_OES 0x8D46 +#endif /* GL_OES_stencil1 */ + +#ifndef GL_OES_stencil4 +#define GL_OES_stencil4 1 +#define GL_STENCIL_INDEX4_OES 0x8D47 +#endif /* GL_OES_stencil4 */ + +#ifndef GL_OES_surfaceless_context +#define GL_OES_surfaceless_context 1 +#define GL_FRAMEBUFFER_UNDEFINED_OES 0x8219 +#endif /* GL_OES_surfaceless_context */ + +#ifndef GL_OES_tessellation_point_size +#define GL_OES_tessellation_point_size 1 +#endif /* GL_OES_tessellation_point_size */ + +#ifndef GL_OES_tessellation_shader +#define GL_OES_tessellation_shader 1 +#define GL_PATCHES_OES 0x000E +#define GL_PATCH_VERTICES_OES 0x8E72 +#define GL_TESS_CONTROL_OUTPUT_VERTICES_OES 0x8E75 +#define GL_TESS_GEN_MODE_OES 0x8E76 +#define GL_TESS_GEN_SPACING_OES 0x8E77 +#define GL_TESS_GEN_VERTEX_ORDER_OES 0x8E78 +#define GL_TESS_GEN_POINT_MODE_OES 0x8E79 +#define GL_ISOLINES_OES 0x8E7A +#define GL_QUADS_OES 0x0007 +#define GL_FRACTIONAL_ODD_OES 0x8E7B +#define GL_FRACTIONAL_EVEN_OES 0x8E7C +#define GL_MAX_PATCH_VERTICES_OES 0x8E7D +#define GL_MAX_TESS_GEN_LEVEL_OES 0x8E7E +#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_OES 0x8E7F +#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_OES 0x8E80 +#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_OES 0x8E81 +#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_OES 0x8E82 +#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_OES 0x8E83 +#define GL_MAX_TESS_PATCH_COMPONENTS_OES 0x8E84 +#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_OES 0x8E85 +#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_OES 0x8E86 +#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_OES 0x8E89 +#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_OES 0x8E8A +#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_OES 0x886C +#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_OES 0x886D +#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_OES 0x8E1E +#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_OES 0x8E1F +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_OES 0x92CD +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_OES 0x92CE +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_OES 0x92D3 +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_OES 0x92D4 +#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_OES 0x90CB +#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_OES 0x90CC +#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_OES 0x90D8 +#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_OES 0x90D9 +#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED_OES 0x8221 +#define GL_IS_PER_PATCH_OES 0x92E7 +#define GL_REFERENCED_BY_TESS_CONTROL_SHADER_OES 0x9307 +#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER_OES 0x9308 +#define GL_TESS_CONTROL_SHADER_OES 0x8E88 +#define GL_TESS_EVALUATION_SHADER_OES 0x8E87 +#define GL_TESS_CONTROL_SHADER_BIT_OES 0x00000008 +#define GL_TESS_EVALUATION_SHADER_BIT_OES 0x00000010 +typedef void (GL_APIENTRYP PFNGLPATCHPARAMETERIOESPROC) (GLenum pname, GLint value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPatchParameteriOES (GLenum pname, GLint value); +#endif +#endif /* GL_OES_tessellation_shader */ + +#ifndef GL_OES_texture_3D +#define GL_OES_texture_3D 1 +#define GL_TEXTURE_WRAP_R_OES 0x8072 +#define GL_TEXTURE_3D_OES 0x806F +#define GL_TEXTURE_BINDING_3D_OES 0x806A +#define GL_MAX_3D_TEXTURE_SIZE_OES 0x8073 +#define GL_SAMPLER_3D_OES 0x8B5F +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_OES 0x8CD4 +typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DOESPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DOESPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DOESPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE3DOESPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexImage3DOES (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glCopyTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glCompressedTexImage3DOES (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage3DOES (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glFramebufferTexture3DOES (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset); +#endif +#endif /* GL_OES_texture_3D */ + +#ifndef GL_OES_texture_border_clamp +#define GL_OES_texture_border_clamp 1 +#define GL_TEXTURE_BORDER_COLOR_OES 0x1004 +#define GL_CLAMP_TO_BORDER_OES 0x812D +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIIVOESPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIUIVOESPROC) (GLenum target, GLenum pname, const GLuint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIIVOESPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIUIVOESPROC) (GLenum target, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIIVOESPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIUIVOESPROC) (GLuint sampler, GLenum pname, const GLuint *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIIVOESPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVOESPROC) (GLuint sampler, GLenum pname, GLuint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexParameterIivOES (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexParameterIuivOES (GLenum target, GLenum pname, const GLuint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIivOES (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIuivOES (GLenum target, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glSamplerParameterIivOES (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterIuivOES (GLuint sampler, GLenum pname, const GLuint *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIivOES (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIuivOES (GLuint sampler, GLenum pname, GLuint *params); +#endif +#endif /* GL_OES_texture_border_clamp */ + +#ifndef GL_OES_texture_buffer +#define GL_OES_texture_buffer 1 +#define GL_TEXTURE_BUFFER_OES 0x8C2A +#define GL_TEXTURE_BUFFER_BINDING_OES 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE_OES 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER_OES 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_OES 0x8C2D +#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_OES 0x919F +#define GL_SAMPLER_BUFFER_OES 0x8DC2 +#define GL_INT_SAMPLER_BUFFER_OES 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER_OES 0x8DD8 +#define GL_IMAGE_BUFFER_OES 0x9051 +#define GL_INT_IMAGE_BUFFER_OES 0x905C +#define GL_UNSIGNED_INT_IMAGE_BUFFER_OES 0x9067 +#define GL_TEXTURE_BUFFER_OFFSET_OES 0x919D +#define GL_TEXTURE_BUFFER_SIZE_OES 0x919E +typedef void (GL_APIENTRYP PFNGLTEXBUFFEROESPROC) (GLenum target, GLenum internalformat, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTEXBUFFERRANGEOESPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexBufferOES (GLenum target, GLenum internalformat, GLuint buffer); +GL_APICALL void GL_APIENTRY glTexBufferRangeOES (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#endif +#endif /* GL_OES_texture_buffer */ + +#ifndef GL_OES_texture_compression_astc +#define GL_OES_texture_compression_astc 1 +#define GL_COMPRESSED_RGBA_ASTC_3x3x3_OES 0x93C0 +#define GL_COMPRESSED_RGBA_ASTC_4x3x3_OES 0x93C1 +#define GL_COMPRESSED_RGBA_ASTC_4x4x3_OES 0x93C2 +#define GL_COMPRESSED_RGBA_ASTC_4x4x4_OES 0x93C3 +#define GL_COMPRESSED_RGBA_ASTC_5x4x4_OES 0x93C4 +#define GL_COMPRESSED_RGBA_ASTC_5x5x4_OES 0x93C5 +#define GL_COMPRESSED_RGBA_ASTC_5x5x5_OES 0x93C6 +#define GL_COMPRESSED_RGBA_ASTC_6x5x5_OES 0x93C7 +#define GL_COMPRESSED_RGBA_ASTC_6x6x5_OES 0x93C8 +#define GL_COMPRESSED_RGBA_ASTC_6x6x6_OES 0x93C9 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES 0x93E0 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES 0x93E1 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES 0x93E2 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES 0x93E3 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES 0x93E4 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES 0x93E5 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES 0x93E6 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES 0x93E7 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES 0x93E8 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES 0x93E9 +#endif /* GL_OES_texture_compression_astc */ + +#ifndef GL_OES_texture_cube_map_array +#define GL_OES_texture_cube_map_array 1 +#define GL_TEXTURE_CUBE_MAP_ARRAY_OES 0x9009 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_OES 0x900A +#define GL_SAMPLER_CUBE_MAP_ARRAY_OES 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_OES 0x900D +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_OES 0x900E +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_OES 0x900F +#define GL_IMAGE_CUBE_MAP_ARRAY_OES 0x9054 +#define GL_INT_IMAGE_CUBE_MAP_ARRAY_OES 0x905F +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_OES 0x906A +#endif /* GL_OES_texture_cube_map_array */ + +#ifndef GL_OES_texture_float +#define GL_OES_texture_float 1 +#endif /* GL_OES_texture_float */ + +#ifndef GL_OES_texture_float_linear +#define GL_OES_texture_float_linear 1 +#endif /* GL_OES_texture_float_linear */ + +#ifndef GL_OES_texture_half_float +#define GL_OES_texture_half_float 1 +#define GL_HALF_FLOAT_OES 0x8D61 +#endif /* GL_OES_texture_half_float */ + +#ifndef GL_OES_texture_half_float_linear +#define GL_OES_texture_half_float_linear 1 +#endif /* GL_OES_texture_half_float_linear */ + +#ifndef GL_OES_texture_npot +#define GL_OES_texture_npot 1 +#endif /* GL_OES_texture_npot */ + +#ifndef GL_OES_texture_stencil8 +#define GL_OES_texture_stencil8 1 +#define GL_STENCIL_INDEX_OES 0x1901 +#define GL_STENCIL_INDEX8_OES 0x8D48 +#endif /* GL_OES_texture_stencil8 */ + +#ifndef GL_OES_texture_storage_multisample_2d_array +#define GL_OES_texture_storage_multisample_2d_array 1 +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY_OES 0x9102 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY_OES 0x9105 +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910B +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910C +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY_OES 0x910D +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DMULTISAMPLEOESPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexStorage3DMultisampleOES (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#endif +#endif /* GL_OES_texture_storage_multisample_2d_array */ + +#ifndef GL_OES_texture_view +#define GL_OES_texture_view 1 +#define GL_TEXTURE_VIEW_MIN_LEVEL_OES 0x82DB +#define GL_TEXTURE_VIEW_NUM_LEVELS_OES 0x82DC +#define GL_TEXTURE_VIEW_MIN_LAYER_OES 0x82DD +#define GL_TEXTURE_VIEW_NUM_LAYERS_OES 0x82DE +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +typedef void (GL_APIENTRYP PFNGLTEXTUREVIEWOESPROC) (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTextureViewOES (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#endif +#endif /* GL_OES_texture_view */ + +#ifndef GL_OES_vertex_array_object +#define GL_OES_vertex_array_object 1 +#define GL_VERTEX_ARRAY_BINDING_OES 0x85B5 +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYOESPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSOESPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSOESPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYOESPROC) (GLuint array); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBindVertexArrayOES (GLuint array); +GL_APICALL void GL_APIENTRY glDeleteVertexArraysOES (GLsizei n, const GLuint *arrays); +GL_APICALL void GL_APIENTRY glGenVertexArraysOES (GLsizei n, GLuint *arrays); +GL_APICALL GLboolean GL_APIENTRY glIsVertexArrayOES (GLuint array); +#endif +#endif /* GL_OES_vertex_array_object */ + +#ifndef GL_OES_vertex_half_float +#define GL_OES_vertex_half_float 1 +#endif /* GL_OES_vertex_half_float */ + +#ifndef GL_OES_vertex_type_10_10_10_2 +#define GL_OES_vertex_type_10_10_10_2 1 +#define GL_UNSIGNED_INT_10_10_10_2_OES 0x8DF6 +#define GL_INT_10_10_10_2_OES 0x8DF7 +#endif /* GL_OES_vertex_type_10_10_10_2 */ + +#ifndef GL_OES_viewport_array +#define GL_OES_viewport_array 1 +#define GL_MAX_VIEWPORTS_OES 0x825B +#define GL_VIEWPORT_SUBPIXEL_BITS_OES 0x825C +#define GL_VIEWPORT_BOUNDS_RANGE_OES 0x825D +#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX_OES 0x825F +typedef void (GL_APIENTRYP PFNGLVIEWPORTARRAYVOESPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFOESPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFVOESPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLSCISSORARRAYVOESPROC) (GLuint first, GLsizei count, const GLint *v); +typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDOESPROC) (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDVOESPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEARRAYFVOESPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEINDEXEDFOESPROC) (GLuint index, GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLGETFLOATI_VOESPROC) (GLenum target, GLuint index, GLfloat *data); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glViewportArrayvOES (GLuint first, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glViewportIndexedfOES (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +GL_APICALL void GL_APIENTRY glViewportIndexedfvOES (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glScissorArrayvOES (GLuint first, GLsizei count, const GLint *v); +GL_APICALL void GL_APIENTRY glScissorIndexedOES (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glScissorIndexedvOES (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glDepthRangeArrayfvOES (GLuint first, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glDepthRangeIndexedfOES (GLuint index, GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glGetFloati_vOES (GLenum target, GLuint index, GLfloat *data); +#endif +#endif /* GL_OES_viewport_array */ + +#ifndef GL_AMD_compressed_3DC_texture +#define GL_AMD_compressed_3DC_texture 1 +#define GL_3DC_X_AMD 0x87F9 +#define GL_3DC_XY_AMD 0x87FA +#endif /* GL_AMD_compressed_3DC_texture */ + +#ifndef GL_AMD_compressed_ATC_texture +#define GL_AMD_compressed_ATC_texture 1 +#define GL_ATC_RGB_AMD 0x8C92 +#define GL_ATC_RGBA_EXPLICIT_ALPHA_AMD 0x8C93 +#define GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD 0x87EE +#endif /* GL_AMD_compressed_ATC_texture */ + +#ifndef GL_AMD_performance_monitor +#define GL_AMD_performance_monitor 1 +#define GL_COUNTER_TYPE_AMD 0x8BC0 +#define GL_COUNTER_RANGE_AMD 0x8BC1 +#define GL_UNSIGNED_INT64_AMD 0x8BC2 +#define GL_PERCENTAGE_AMD 0x8BC3 +#define GL_PERFMON_RESULT_AVAILABLE_AMD 0x8BC4 +#define GL_PERFMON_RESULT_SIZE_AMD 0x8BC5 +#define GL_PERFMON_RESULT_AMD 0x8BC6 +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORGROUPSAMDPROC) (GLint *numGroups, GLsizei groupsSize, GLuint *groups); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERSAMDPROC) (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORGROUPSTRINGAMDPROC) (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERSTRINGAMDPROC) (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERINFOAMDPROC) (GLuint group, GLuint counter, GLenum pname, void *data); +typedef void (GL_APIENTRYP PFNGLGENPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors); +typedef void (GL_APIENTRYP PFNGLDELETEPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors); +typedef void (GL_APIENTRYP PFNGLSELECTPERFMONITORCOUNTERSAMDPROC) (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList); +typedef void (GL_APIENTRYP PFNGLBEGINPERFMONITORAMDPROC) (GLuint monitor); +typedef void (GL_APIENTRYP PFNGLENDPERFMONITORAMDPROC) (GLuint monitor); +typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERDATAAMDPROC) (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetPerfMonitorGroupsAMD (GLint *numGroups, GLsizei groupsSize, GLuint *groups); +GL_APICALL void GL_APIENTRY glGetPerfMonitorCountersAMD (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters); +GL_APICALL void GL_APIENTRY glGetPerfMonitorGroupStringAMD (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString); +GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterStringAMD (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString); +GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterInfoAMD (GLuint group, GLuint counter, GLenum pname, void *data); +GL_APICALL void GL_APIENTRY glGenPerfMonitorsAMD (GLsizei n, GLuint *monitors); +GL_APICALL void GL_APIENTRY glDeletePerfMonitorsAMD (GLsizei n, GLuint *monitors); +GL_APICALL void GL_APIENTRY glSelectPerfMonitorCountersAMD (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList); +GL_APICALL void GL_APIENTRY glBeginPerfMonitorAMD (GLuint monitor); +GL_APICALL void GL_APIENTRY glEndPerfMonitorAMD (GLuint monitor); +GL_APICALL void GL_APIENTRY glGetPerfMonitorCounterDataAMD (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten); +#endif +#endif /* GL_AMD_performance_monitor */ + +#ifndef GL_AMD_program_binary_Z400 +#define GL_AMD_program_binary_Z400 1 +#define GL_Z400_BINARY_AMD 0x8740 +#endif /* GL_AMD_program_binary_Z400 */ + +#ifndef GL_ANDROID_extension_pack_es31a +#define GL_ANDROID_extension_pack_es31a 1 +#endif /* GL_ANDROID_extension_pack_es31a */ + +#ifndef GL_ANGLE_depth_texture +#define GL_ANGLE_depth_texture 1 +#endif /* GL_ANGLE_depth_texture */ + +#ifndef GL_ANGLE_framebuffer_blit +#define GL_ANGLE_framebuffer_blit 1 +#define GL_READ_FRAMEBUFFER_ANGLE 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_ANGLE 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_ANGLE 0x8CA6 +#define GL_READ_FRAMEBUFFER_BINDING_ANGLE 0x8CAA +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERANGLEPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlitFramebufferANGLE (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#endif +#endif /* GL_ANGLE_framebuffer_blit */ + +#ifndef GL_ANGLE_framebuffer_multisample +#define GL_ANGLE_framebuffer_multisample 1 +#define GL_RENDERBUFFER_SAMPLES_ANGLE 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_ANGLE 0x8D56 +#define GL_MAX_SAMPLES_ANGLE 0x8D57 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEANGLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleANGLE (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#endif +#endif /* GL_ANGLE_framebuffer_multisample */ + +#ifndef GL_ANGLE_instanced_arrays +#define GL_ANGLE_instanced_arrays 1 +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ANGLE 0x88FE +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDANGLEPROC) (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDANGLEPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORANGLEPROC) (GLuint index, GLuint divisor); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawArraysInstancedANGLE (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedANGLE (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +GL_APICALL void GL_APIENTRY glVertexAttribDivisorANGLE (GLuint index, GLuint divisor); +#endif +#endif /* GL_ANGLE_instanced_arrays */ + +#ifndef GL_ANGLE_pack_reverse_row_order +#define GL_ANGLE_pack_reverse_row_order 1 +#define GL_PACK_REVERSE_ROW_ORDER_ANGLE 0x93A4 +#endif /* GL_ANGLE_pack_reverse_row_order */ + +#ifndef GL_ANGLE_program_binary +#define GL_ANGLE_program_binary 1 +#define GL_PROGRAM_BINARY_ANGLE 0x93A6 +#endif /* GL_ANGLE_program_binary */ + +#ifndef GL_ANGLE_texture_compression_dxt3 +#define GL_ANGLE_texture_compression_dxt3 1 +#define GL_COMPRESSED_RGBA_S3TC_DXT3_ANGLE 0x83F2 +#endif /* GL_ANGLE_texture_compression_dxt3 */ + +#ifndef GL_ANGLE_texture_compression_dxt5 +#define GL_ANGLE_texture_compression_dxt5 1 +#define GL_COMPRESSED_RGBA_S3TC_DXT5_ANGLE 0x83F3 +#endif /* GL_ANGLE_texture_compression_dxt5 */ + +#ifndef GL_ANGLE_texture_usage +#define GL_ANGLE_texture_usage 1 +#define GL_TEXTURE_USAGE_ANGLE 0x93A2 +#define GL_FRAMEBUFFER_ATTACHMENT_ANGLE 0x93A3 +#endif /* GL_ANGLE_texture_usage */ + +#ifndef GL_ANGLE_translated_shader_source +#define GL_ANGLE_translated_shader_source 1 +#define GL_TRANSLATED_SHADER_SOURCE_LENGTH_ANGLE 0x93A0 +typedef void (GL_APIENTRYP PFNGLGETTRANSLATEDSHADERSOURCEANGLEPROC) (GLuint shader, GLsizei bufsize, GLsizei *length, GLchar *source); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetTranslatedShaderSourceANGLE (GLuint shader, GLsizei bufsize, GLsizei *length, GLchar *source); +#endif +#endif /* GL_ANGLE_translated_shader_source */ + +#ifndef GL_APPLE_clip_distance +#define GL_APPLE_clip_distance 1 +#define GL_MAX_CLIP_DISTANCES_APPLE 0x0D32 +#define GL_CLIP_DISTANCE0_APPLE 0x3000 +#define GL_CLIP_DISTANCE1_APPLE 0x3001 +#define GL_CLIP_DISTANCE2_APPLE 0x3002 +#define GL_CLIP_DISTANCE3_APPLE 0x3003 +#define GL_CLIP_DISTANCE4_APPLE 0x3004 +#define GL_CLIP_DISTANCE5_APPLE 0x3005 +#define GL_CLIP_DISTANCE6_APPLE 0x3006 +#define GL_CLIP_DISTANCE7_APPLE 0x3007 +#endif /* GL_APPLE_clip_distance */ + +#ifndef GL_APPLE_color_buffer_packed_float +#define GL_APPLE_color_buffer_packed_float 1 +#endif /* GL_APPLE_color_buffer_packed_float */ + +#ifndef GL_APPLE_copy_texture_levels +#define GL_APPLE_copy_texture_levels 1 +typedef void (GL_APIENTRYP PFNGLCOPYTEXTURELEVELSAPPLEPROC) (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCopyTextureLevelsAPPLE (GLuint destinationTexture, GLuint sourceTexture, GLint sourceBaseLevel, GLsizei sourceLevelCount); +#endif +#endif /* GL_APPLE_copy_texture_levels */ + +#ifndef GL_APPLE_framebuffer_multisample +#define GL_APPLE_framebuffer_multisample 1 +#define GL_RENDERBUFFER_SAMPLES_APPLE 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_APPLE 0x8D56 +#define GL_MAX_SAMPLES_APPLE 0x8D57 +#define GL_READ_FRAMEBUFFER_APPLE 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_APPLE 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_APPLE 0x8CA6 +#define GL_READ_FRAMEBUFFER_BINDING_APPLE 0x8CAA +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEAPPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLRESOLVEMULTISAMPLEFRAMEBUFFERAPPLEPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleAPPLE (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glResolveMultisampleFramebufferAPPLE (void); +#endif +#endif /* GL_APPLE_framebuffer_multisample */ + +#ifndef GL_APPLE_rgb_422 +#define GL_APPLE_rgb_422 1 +#define GL_RGB_422_APPLE 0x8A1F +#define GL_UNSIGNED_SHORT_8_8_APPLE 0x85BA +#define GL_UNSIGNED_SHORT_8_8_REV_APPLE 0x85BB +#define GL_RGB_RAW_422_APPLE 0x8A51 +#endif /* GL_APPLE_rgb_422 */ + +#ifndef GL_APPLE_sync +#define GL_APPLE_sync 1 +#define GL_SYNC_OBJECT_APPLE 0x8A53 +#define GL_MAX_SERVER_WAIT_TIMEOUT_APPLE 0x9111 +#define GL_OBJECT_TYPE_APPLE 0x9112 +#define GL_SYNC_CONDITION_APPLE 0x9113 +#define GL_SYNC_STATUS_APPLE 0x9114 +#define GL_SYNC_FLAGS_APPLE 0x9115 +#define GL_SYNC_FENCE_APPLE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE_APPLE 0x9117 +#define GL_UNSIGNALED_APPLE 0x9118 +#define GL_SIGNALED_APPLE 0x9119 +#define GL_ALREADY_SIGNALED_APPLE 0x911A +#define GL_TIMEOUT_EXPIRED_APPLE 0x911B +#define GL_CONDITION_SATISFIED_APPLE 0x911C +#define GL_WAIT_FAILED_APPLE 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT_APPLE 0x00000001 +#define GL_TIMEOUT_IGNORED_APPLE 0xFFFFFFFFFFFFFFFFull +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCAPPLEPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCAPPLEPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCAPPLEPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCAPPLEPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VAPPLEPROC) (GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVAPPLEPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLsync GL_APIENTRY glFenceSyncAPPLE (GLenum condition, GLbitfield flags); +GL_APICALL GLboolean GL_APIENTRY glIsSyncAPPLE (GLsync sync); +GL_APICALL void GL_APIENTRY glDeleteSyncAPPLE (GLsync sync); +GL_APICALL GLenum GL_APIENTRY glClientWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glWaitSyncAPPLE (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glGetInteger64vAPPLE (GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGetSyncivAPPLE (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +#endif +#endif /* GL_APPLE_sync */ + +#ifndef GL_APPLE_texture_format_BGRA8888 +#define GL_APPLE_texture_format_BGRA8888 1 +#define GL_BGRA_EXT 0x80E1 +#define GL_BGRA8_EXT 0x93A1 +#endif /* GL_APPLE_texture_format_BGRA8888 */ + +#ifndef GL_APPLE_texture_max_level +#define GL_APPLE_texture_max_level 1 +#define GL_TEXTURE_MAX_LEVEL_APPLE 0x813D +#endif /* GL_APPLE_texture_max_level */ + +#ifndef GL_APPLE_texture_packed_float +#define GL_APPLE_texture_packed_float 1 +#define GL_UNSIGNED_INT_10F_11F_11F_REV_APPLE 0x8C3B +#define GL_UNSIGNED_INT_5_9_9_9_REV_APPLE 0x8C3E +#define GL_R11F_G11F_B10F_APPLE 0x8C3A +#define GL_RGB9_E5_APPLE 0x8C3D +#endif /* GL_APPLE_texture_packed_float */ + +#ifndef GL_ARM_mali_program_binary +#define GL_ARM_mali_program_binary 1 +#define GL_MALI_PROGRAM_BINARY_ARM 0x8F61 +#endif /* GL_ARM_mali_program_binary */ + +#ifndef GL_ARM_mali_shader_binary +#define GL_ARM_mali_shader_binary 1 +#define GL_MALI_SHADER_BINARY_ARM 0x8F60 +#endif /* GL_ARM_mali_shader_binary */ + +#ifndef GL_ARM_rgba8 +#define GL_ARM_rgba8 1 +#endif /* GL_ARM_rgba8 */ + +#ifndef GL_ARM_shader_framebuffer_fetch +#define GL_ARM_shader_framebuffer_fetch 1 +#define GL_FETCH_PER_SAMPLE_ARM 0x8F65 +#define GL_FRAGMENT_SHADER_FRAMEBUFFER_FETCH_MRT_ARM 0x8F66 +#endif /* GL_ARM_shader_framebuffer_fetch */ + +#ifndef GL_ARM_shader_framebuffer_fetch_depth_stencil +#define GL_ARM_shader_framebuffer_fetch_depth_stencil 1 +#endif /* GL_ARM_shader_framebuffer_fetch_depth_stencil */ + +#ifndef GL_DMP_program_binary +#define GL_DMP_program_binary 1 +#define GL_SMAPHS30_PROGRAM_BINARY_DMP 0x9251 +#define GL_SMAPHS_PROGRAM_BINARY_DMP 0x9252 +#define GL_DMP_PROGRAM_BINARY_DMP 0x9253 +#endif /* GL_DMP_program_binary */ + +#ifndef GL_DMP_shader_binary +#define GL_DMP_shader_binary 1 +#define GL_SHADER_BINARY_DMP 0x9250 +#endif /* GL_DMP_shader_binary */ + +#ifndef GL_EXT_EGL_image_array +#define GL_EXT_EGL_image_array 1 +#endif /* GL_EXT_EGL_image_array */ + +#ifndef GL_EXT_YUV_target +#define GL_EXT_YUV_target 1 +#define GL_SAMPLER_EXTERNAL_2D_Y2Y_EXT 0x8BE7 +#endif /* GL_EXT_YUV_target */ + +#ifndef GL_EXT_base_instance +#define GL_EXT_base_instance 1 +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEEXTPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawArraysInstancedBaseInstanceEXT (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseInstanceEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexBaseInstanceEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance); +#endif +#endif /* GL_EXT_base_instance */ + +#ifndef GL_EXT_blend_func_extended +#define GL_EXT_blend_func_extended 1 +#define GL_SRC1_COLOR_EXT 0x88F9 +#define GL_SRC1_ALPHA_EXT 0x8589 +#define GL_ONE_MINUS_SRC1_COLOR_EXT 0x88FA +#define GL_ONE_MINUS_SRC1_ALPHA_EXT 0x88FB +#define GL_SRC_ALPHA_SATURATE_EXT 0x0308 +#define GL_LOCATION_INDEX_EXT 0x930F +#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS_EXT 0x88FC +typedef void (GL_APIENTRYP PFNGLBINDFRAGDATALOCATIONINDEXEDEXTPROC) (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDFRAGDATALOCATIONEXTPROC) (GLuint program, GLuint color, const GLchar *name); +typedef GLint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONINDEXEXTPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATAINDEXEXTPROC) (GLuint program, const GLchar *name); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBindFragDataLocationIndexedEXT (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindFragDataLocationEXT (GLuint program, GLuint color, const GLchar *name); +GL_APICALL GLint GL_APIENTRY glGetProgramResourceLocationIndexEXT (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL GLint GL_APIENTRY glGetFragDataIndexEXT (GLuint program, const GLchar *name); +#endif +#endif /* GL_EXT_blend_func_extended */ + +#ifndef GL_EXT_blend_minmax +#define GL_EXT_blend_minmax 1 +#define GL_MIN_EXT 0x8007 +#define GL_MAX_EXT 0x8008 +#endif /* GL_EXT_blend_minmax */ + +#ifndef GL_EXT_buffer_storage +#define GL_EXT_buffer_storage 1 +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_PERSISTENT_BIT_EXT 0x0040 +#define GL_MAP_COHERENT_BIT_EXT 0x0080 +#define GL_DYNAMIC_STORAGE_BIT_EXT 0x0100 +#define GL_CLIENT_STORAGE_BIT_EXT 0x0200 +#define GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT 0x00004000 +#define GL_BUFFER_IMMUTABLE_STORAGE_EXT 0x821F +#define GL_BUFFER_STORAGE_FLAGS_EXT 0x8220 +typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEEXTPROC) (GLenum target, GLsizeiptr size, const void *data, GLbitfield flags); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBufferStorageEXT (GLenum target, GLsizeiptr size, const void *data, GLbitfield flags); +#endif +#endif /* GL_EXT_buffer_storage */ + +#ifndef GL_EXT_clear_texture +#define GL_EXT_clear_texture 1 +typedef void (GL_APIENTRYP PFNGLCLEARTEXIMAGEEXTPROC) (GLuint texture, GLint level, GLenum format, GLenum type, const void *data); +typedef void (GL_APIENTRYP PFNGLCLEARTEXSUBIMAGEEXTPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glClearTexImageEXT (GLuint texture, GLint level, GLenum format, GLenum type, const void *data); +GL_APICALL void GL_APIENTRY glClearTexSubImageEXT (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data); +#endif +#endif /* GL_EXT_clear_texture */ + +#ifndef GL_EXT_clip_cull_distance +#define GL_EXT_clip_cull_distance 1 +#define GL_MAX_CLIP_DISTANCES_EXT 0x0D32 +#define GL_MAX_CULL_DISTANCES_EXT 0x82F9 +#define GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES_EXT 0x82FA +#define GL_CLIP_DISTANCE0_EXT 0x3000 +#define GL_CLIP_DISTANCE1_EXT 0x3001 +#define GL_CLIP_DISTANCE2_EXT 0x3002 +#define GL_CLIP_DISTANCE3_EXT 0x3003 +#define GL_CLIP_DISTANCE4_EXT 0x3004 +#define GL_CLIP_DISTANCE5_EXT 0x3005 +#define GL_CLIP_DISTANCE6_EXT 0x3006 +#define GL_CLIP_DISTANCE7_EXT 0x3007 +#endif /* GL_EXT_clip_cull_distance */ + +#ifndef GL_EXT_color_buffer_float +#define GL_EXT_color_buffer_float 1 +#endif /* GL_EXT_color_buffer_float */ + +#ifndef GL_EXT_color_buffer_half_float +#define GL_EXT_color_buffer_half_float 1 +#define GL_RGBA16F_EXT 0x881A +#define GL_RGB16F_EXT 0x881B +#define GL_RG16F_EXT 0x822F +#define GL_R16F_EXT 0x822D +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE_EXT 0x8211 +#define GL_UNSIGNED_NORMALIZED_EXT 0x8C17 +#endif /* GL_EXT_color_buffer_half_float */ + +#ifndef GL_EXT_conservative_depth +#define GL_EXT_conservative_depth 1 +#endif /* GL_EXT_conservative_depth */ + +#ifndef GL_EXT_copy_image +#define GL_EXT_copy_image 1 +typedef void (GL_APIENTRYP PFNGLCOPYIMAGESUBDATAEXTPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCopyImageSubDataEXT (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +#endif +#endif /* GL_EXT_copy_image */ + +#ifndef GL_EXT_debug_label +#define GL_EXT_debug_label 1 +#define GL_PROGRAM_PIPELINE_OBJECT_EXT 0x8A4F +#define GL_PROGRAM_OBJECT_EXT 0x8B40 +#define GL_SHADER_OBJECT_EXT 0x8B48 +#define GL_BUFFER_OBJECT_EXT 0x9151 +#define GL_QUERY_OBJECT_EXT 0x9153 +#define GL_VERTEX_ARRAY_OBJECT_EXT 0x9154 +#define GL_TRANSFORM_FEEDBACK 0x8E22 +typedef void (GL_APIENTRYP PFNGLLABELOBJECTEXTPROC) (GLenum type, GLuint object, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTLABELEXTPROC) (GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glLabelObjectEXT (GLenum type, GLuint object, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectLabelEXT (GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label); +#endif +#endif /* GL_EXT_debug_label */ + +#ifndef GL_EXT_debug_marker +#define GL_EXT_debug_marker 1 +typedef void (GL_APIENTRYP PFNGLINSERTEVENTMARKEREXTPROC) (GLsizei length, const GLchar *marker); +typedef void (GL_APIENTRYP PFNGLPUSHGROUPMARKEREXTPROC) (GLsizei length, const GLchar *marker); +typedef void (GL_APIENTRYP PFNGLPOPGROUPMARKEREXTPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glInsertEventMarkerEXT (GLsizei length, const GLchar *marker); +GL_APICALL void GL_APIENTRY glPushGroupMarkerEXT (GLsizei length, const GLchar *marker); +GL_APICALL void GL_APIENTRY glPopGroupMarkerEXT (void); +#endif +#endif /* GL_EXT_debug_marker */ + +#ifndef GL_EXT_discard_framebuffer +#define GL_EXT_discard_framebuffer 1 +#define GL_COLOR_EXT 0x1800 +#define GL_DEPTH_EXT 0x1801 +#define GL_STENCIL_EXT 0x1802 +typedef void (GL_APIENTRYP PFNGLDISCARDFRAMEBUFFEREXTPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDiscardFramebufferEXT (GLenum target, GLsizei numAttachments, const GLenum *attachments); +#endif +#endif /* GL_EXT_discard_framebuffer */ + +#ifndef GL_EXT_disjoint_timer_query +#define GL_EXT_disjoint_timer_query 1 +#define GL_QUERY_COUNTER_BITS_EXT 0x8864 +#define GL_CURRENT_QUERY_EXT 0x8865 +#define GL_QUERY_RESULT_EXT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE_EXT 0x8867 +#define GL_TIME_ELAPSED_EXT 0x88BF +#define GL_TIMESTAMP_EXT 0x8E28 +#define GL_GPU_DISJOINT_EXT 0x8FBB +typedef void (GL_APIENTRYP PFNGLGENQUERIESEXTPROC) (GLsizei n, GLuint *ids); +typedef void (GL_APIENTRYP PFNGLDELETEQUERIESEXTPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISQUERYEXTPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLBEGINQUERYEXTPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLENDQUERYEXTPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLQUERYCOUNTEREXTPROC) (GLuint id, GLenum target); +typedef void (GL_APIENTRYP PFNGLGETQUERYIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTIVEXTPROC) (GLuint id, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVEXTPROC) (GLuint id, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTI64VEXTPROC) (GLuint id, GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUI64VEXTPROC) (GLuint id, GLenum pname, GLuint64 *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGenQueriesEXT (GLsizei n, GLuint *ids); +GL_APICALL void GL_APIENTRY glDeleteQueriesEXT (GLsizei n, const GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsQueryEXT (GLuint id); +GL_APICALL void GL_APIENTRY glBeginQueryEXT (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glEndQueryEXT (GLenum target); +GL_APICALL void GL_APIENTRY glQueryCounterEXT (GLuint id, GLenum target); +GL_APICALL void GL_APIENTRY glGetQueryivEXT (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectivEXT (GLuint id, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectuivEXT (GLuint id, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjecti64vEXT (GLuint id, GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectui64vEXT (GLuint id, GLenum pname, GLuint64 *params); +#endif +#endif /* GL_EXT_disjoint_timer_query */ + +#ifndef GL_EXT_draw_buffers +#define GL_EXT_draw_buffers 1 +#define GL_MAX_COLOR_ATTACHMENTS_EXT 0x8CDF +#define GL_MAX_DRAW_BUFFERS_EXT 0x8824 +#define GL_DRAW_BUFFER0_EXT 0x8825 +#define GL_DRAW_BUFFER1_EXT 0x8826 +#define GL_DRAW_BUFFER2_EXT 0x8827 +#define GL_DRAW_BUFFER3_EXT 0x8828 +#define GL_DRAW_BUFFER4_EXT 0x8829 +#define GL_DRAW_BUFFER5_EXT 0x882A +#define GL_DRAW_BUFFER6_EXT 0x882B +#define GL_DRAW_BUFFER7_EXT 0x882C +#define GL_DRAW_BUFFER8_EXT 0x882D +#define GL_DRAW_BUFFER9_EXT 0x882E +#define GL_DRAW_BUFFER10_EXT 0x882F +#define GL_DRAW_BUFFER11_EXT 0x8830 +#define GL_DRAW_BUFFER12_EXT 0x8831 +#define GL_DRAW_BUFFER13_EXT 0x8832 +#define GL_DRAW_BUFFER14_EXT 0x8833 +#define GL_DRAW_BUFFER15_EXT 0x8834 +#define GL_COLOR_ATTACHMENT0_EXT 0x8CE0 +#define GL_COLOR_ATTACHMENT1_EXT 0x8CE1 +#define GL_COLOR_ATTACHMENT2_EXT 0x8CE2 +#define GL_COLOR_ATTACHMENT3_EXT 0x8CE3 +#define GL_COLOR_ATTACHMENT4_EXT 0x8CE4 +#define GL_COLOR_ATTACHMENT5_EXT 0x8CE5 +#define GL_COLOR_ATTACHMENT6_EXT 0x8CE6 +#define GL_COLOR_ATTACHMENT7_EXT 0x8CE7 +#define GL_COLOR_ATTACHMENT8_EXT 0x8CE8 +#define GL_COLOR_ATTACHMENT9_EXT 0x8CE9 +#define GL_COLOR_ATTACHMENT10_EXT 0x8CEA +#define GL_COLOR_ATTACHMENT11_EXT 0x8CEB +#define GL_COLOR_ATTACHMENT12_EXT 0x8CEC +#define GL_COLOR_ATTACHMENT13_EXT 0x8CED +#define GL_COLOR_ATTACHMENT14_EXT 0x8CEE +#define GL_COLOR_ATTACHMENT15_EXT 0x8CEF +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSEXTPROC) (GLsizei n, const GLenum *bufs); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawBuffersEXT (GLsizei n, const GLenum *bufs); +#endif +#endif /* GL_EXT_draw_buffers */ + +#ifndef GL_EXT_draw_buffers_indexed +#define GL_EXT_draw_buffers_indexed 1 +typedef void (GL_APIENTRYP PFNGLENABLEIEXTPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDISABLEIEXTPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONIEXTPROC) (GLuint buf, GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEIEXTPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCIEXTPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEIEXTPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +typedef void (GL_APIENTRYP PFNGLCOLORMASKIEXTPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDIEXTPROC) (GLenum target, GLuint index); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glEnableiEXT (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisableiEXT (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glBlendEquationiEXT (GLuint buf, GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparateiEXT (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunciEXT (GLuint buf, GLenum src, GLenum dst); +GL_APICALL void GL_APIENTRY glBlendFuncSeparateiEXT (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +GL_APICALL void GL_APIENTRY glColorMaskiEXT (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GL_APICALL GLboolean GL_APIENTRY glIsEnablediEXT (GLenum target, GLuint index); +#endif +#endif /* GL_EXT_draw_buffers_indexed */ + +#ifndef GL_EXT_draw_elements_base_vertex +#define GL_EXT_draw_elements_base_vertex 1 +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXEXTPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSBASEVERTEXEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawElementsBaseVertexEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawRangeElementsBaseVertexEXT (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertexEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +GL_APICALL void GL_APIENTRY glMultiDrawElementsBaseVertexEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, const GLint *basevertex); +#endif +#endif /* GL_EXT_draw_elements_base_vertex */ + +#ifndef GL_EXT_draw_instanced +#define GL_EXT_draw_instanced 1 +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDEXTPROC) (GLenum mode, GLint start, GLsizei count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawArraysInstancedEXT (GLenum mode, GLint start, GLsizei count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +#endif +#endif /* GL_EXT_draw_instanced */ + +#ifndef GL_EXT_draw_transform_feedback +#define GL_EXT_draw_transform_feedback 1 +typedef void (GL_APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKEXTPROC) (GLenum mode, GLuint id); +typedef void (GL_APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDEXTPROC) (GLenum mode, GLuint id, GLsizei instancecount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawTransformFeedbackEXT (GLenum mode, GLuint id); +GL_APICALL void GL_APIENTRY glDrawTransformFeedbackInstancedEXT (GLenum mode, GLuint id, GLsizei instancecount); +#endif +#endif /* GL_EXT_draw_transform_feedback */ + +#ifndef GL_EXT_external_buffer +#define GL_EXT_external_buffer 1 +typedef void *GLeglClientBufferEXT; +typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEEXTERNALEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags); +typedef void (GL_APIENTRYP PFNGLNAMEDBUFFERSTORAGEEXTERNALEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBufferStorageExternalEXT (GLenum target, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags); +GL_APICALL void GL_APIENTRY glNamedBufferStorageExternalEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags); +#endif +#endif /* GL_EXT_external_buffer */ + +#ifndef GL_EXT_float_blend +#define GL_EXT_float_blend 1 +#endif /* GL_EXT_float_blend */ + +#ifndef GL_EXT_geometry_point_size +#define GL_EXT_geometry_point_size 1 +#endif /* GL_EXT_geometry_point_size */ + +#ifndef GL_EXT_geometry_shader +#define GL_EXT_geometry_shader 1 +#define GL_GEOMETRY_SHADER_EXT 0x8DD9 +#define GL_GEOMETRY_SHADER_BIT_EXT 0x00000004 +#define GL_GEOMETRY_LINKED_VERTICES_OUT_EXT 0x8916 +#define GL_GEOMETRY_LINKED_INPUT_TYPE_EXT 0x8917 +#define GL_GEOMETRY_LINKED_OUTPUT_TYPE_EXT 0x8918 +#define GL_GEOMETRY_SHADER_INVOCATIONS_EXT 0x887F +#define GL_LAYER_PROVOKING_VERTEX_EXT 0x825E +#define GL_LINES_ADJACENCY_EXT 0x000A +#define GL_LINE_STRIP_ADJACENCY_EXT 0x000B +#define GL_TRIANGLES_ADJACENCY_EXT 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY_EXT 0x000D +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT 0x8DDF +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS_EXT 0x8A2C +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS_EXT 0x8A32 +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS_EXT 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS_EXT 0x9124 +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT 0x8DE1 +#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS_EXT 0x8E5A +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT 0x8C29 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS_EXT 0x92CF +#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS_EXT 0x92D5 +#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS_EXT 0x90CD +#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS_EXT 0x90D7 +#define GL_FIRST_VERTEX_CONVENTION_EXT 0x8E4D +#define GL_LAST_VERTEX_CONVENTION_EXT 0x8E4E +#define GL_UNDEFINED_VERTEX_EXT 0x8260 +#define GL_PRIMITIVES_GENERATED_EXT 0x8C87 +#define GL_FRAMEBUFFER_DEFAULT_LAYERS_EXT 0x9312 +#define GL_MAX_FRAMEBUFFER_LAYERS_EXT 0x9317 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT 0x8DA8 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT 0x8DA7 +#define GL_REFERENCED_BY_GEOMETRY_SHADER_EXT 0x9309 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREEXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTextureEXT (GLenum target, GLenum attachment, GLuint texture, GLint level); +#endif +#endif /* GL_EXT_geometry_shader */ + +#ifndef GL_EXT_gpu_shader5 +#define GL_EXT_gpu_shader5 1 +#endif /* GL_EXT_gpu_shader5 */ + +#ifndef GL_EXT_instanced_arrays +#define GL_EXT_instanced_arrays 1 +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_EXT 0x88FE +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISOREXTPROC) (GLuint index, GLuint divisor); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glVertexAttribDivisorEXT (GLuint index, GLuint divisor); +#endif +#endif /* GL_EXT_instanced_arrays */ + +#ifndef GL_EXT_map_buffer_range +#define GL_EXT_map_buffer_range 1 +#define GL_MAP_READ_BIT_EXT 0x0001 +#define GL_MAP_WRITE_BIT_EXT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT_EXT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT_EXT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT_EXT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT_EXT 0x0020 +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void *GL_APIENTRY glMapBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_APICALL void GL_APIENTRY glFlushMappedBufferRangeEXT (GLenum target, GLintptr offset, GLsizeiptr length); +#endif +#endif /* GL_EXT_map_buffer_range */ + +#ifndef GL_EXT_memory_object +#define GL_EXT_memory_object 1 +#define GL_TEXTURE_TILING_EXT 0x9580 +#define GL_DEDICATED_MEMORY_OBJECT_EXT 0x9581 +#define GL_PROTECTED_MEMORY_OBJECT_EXT 0x959B +#define GL_NUM_TILING_TYPES_EXT 0x9582 +#define GL_TILING_TYPES_EXT 0x9583 +#define GL_OPTIMAL_TILING_EXT 0x9584 +#define GL_LINEAR_TILING_EXT 0x9585 +#define GL_NUM_DEVICE_UUIDS_EXT 0x9596 +#define GL_DEVICE_UUID_EXT 0x9597 +#define GL_DRIVER_UUID_EXT 0x9598 +#define GL_UUID_SIZE_EXT 16 +typedef void (GL_APIENTRYP PFNGLGETUNSIGNEDBYTEVEXTPROC) (GLenum pname, GLubyte *data); +typedef void (GL_APIENTRYP PFNGLGETUNSIGNEDBYTEI_VEXTPROC) (GLenum target, GLuint index, GLubyte *data); +typedef void (GL_APIENTRYP PFNGLDELETEMEMORYOBJECTSEXTPROC) (GLsizei n, const GLuint *memoryObjects); +typedef GLboolean (GL_APIENTRYP PFNGLISMEMORYOBJECTEXTPROC) (GLuint memoryObject); +typedef void (GL_APIENTRYP PFNGLCREATEMEMORYOBJECTSEXTPROC) (GLsizei n, GLuint *memoryObjects); +typedef void (GL_APIENTRYP PFNGLMEMORYOBJECTPARAMETERIVEXTPROC) (GLuint memoryObject, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLGETMEMORYOBJECTPARAMETERIVEXTPROC) (GLuint memoryObject, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM2DMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGEMEM3DMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLBUFFERSTORAGEMEMEXTPROC) (GLenum target, GLsizeiptr size, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM2DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM2DMULTISAMPLEEXTPROC) (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM3DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGEMEM3DMULTISAMPLEEXTPROC) (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +typedef void (GL_APIENTRYP PFNGLNAMEDBUFFERSTORAGEMEMEXTPROC) (GLuint buffer, GLsizeiptr size, GLuint memory, GLuint64 offset); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetUnsignedBytevEXT (GLenum pname, GLubyte *data); +GL_APICALL void GL_APIENTRY glGetUnsignedBytei_vEXT (GLenum target, GLuint index, GLubyte *data); +GL_APICALL void GL_APIENTRY glDeleteMemoryObjectsEXT (GLsizei n, const GLuint *memoryObjects); +GL_APICALL GLboolean GL_APIENTRY glIsMemoryObjectEXT (GLuint memoryObject); +GL_APICALL void GL_APIENTRY glCreateMemoryObjectsEXT (GLsizei n, GLuint *memoryObjects); +GL_APICALL void GL_APIENTRY glMemoryObjectParameterivEXT (GLuint memoryObject, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glGetMemoryObjectParameterivEXT (GLuint memoryObject, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glTexStorageMem2DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTexStorageMem2DMultisampleEXT (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTexStorageMem3DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTexStorageMem3DMultisampleEXT (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glBufferStorageMemEXT (GLenum target, GLsizeiptr size, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTextureStorageMem2DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTextureStorageMem2DMultisampleEXT (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTextureStorageMem3DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glTextureStorageMem3DMultisampleEXT (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset); +GL_APICALL void GL_APIENTRY glNamedBufferStorageMemEXT (GLuint buffer, GLsizeiptr size, GLuint memory, GLuint64 offset); +#endif +#endif /* GL_EXT_memory_object */ + +#ifndef GL_EXT_memory_object_fd +#define GL_EXT_memory_object_fd 1 +#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586 +typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYFDEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, GLint fd); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glImportMemoryFdEXT (GLuint memory, GLuint64 size, GLenum handleType, GLint fd); +#endif +#endif /* GL_EXT_memory_object_fd */ + +#ifndef GL_EXT_memory_object_win32 +#define GL_EXT_memory_object_win32 1 +#define GL_HANDLE_TYPE_OPAQUE_WIN32_EXT 0x9587 +#define GL_HANDLE_TYPE_OPAQUE_WIN32_KMT_EXT 0x9588 +#define GL_DEVICE_LUID_EXT 0x9599 +#define GL_DEVICE_NODE_MASK_EXT 0x959A +#define GL_LUID_SIZE_EXT 8 +#define GL_HANDLE_TYPE_D3D12_TILEPOOL_EXT 0x9589 +#define GL_HANDLE_TYPE_D3D12_RESOURCE_EXT 0x958A +#define GL_HANDLE_TYPE_D3D11_IMAGE_EXT 0x958B +#define GL_HANDLE_TYPE_D3D11_IMAGE_KMT_EXT 0x958C +typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYWIN32HANDLEEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, void *handle); +typedef void (GL_APIENTRYP PFNGLIMPORTMEMORYWIN32NAMEEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, const void *name); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glImportMemoryWin32HandleEXT (GLuint memory, GLuint64 size, GLenum handleType, void *handle); +GL_APICALL void GL_APIENTRY glImportMemoryWin32NameEXT (GLuint memory, GLuint64 size, GLenum handleType, const void *name); +#endif +#endif /* GL_EXT_memory_object_win32 */ + +#ifndef GL_EXT_multi_draw_arrays +#define GL_EXT_multi_draw_arrays 1 +typedef void (GL_APIENTRYP PFNGLMULTIDRAWARRAYSEXTPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glMultiDrawArraysEXT (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glMultiDrawElementsEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount); +#endif +#endif /* GL_EXT_multi_draw_arrays */ + +#ifndef GL_EXT_multi_draw_indirect +#define GL_EXT_multi_draw_indirect 1 +typedef void (GL_APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTEXTPROC) (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride); +typedef void (GL_APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTEXTPROC) (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glMultiDrawArraysIndirectEXT (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride); +GL_APICALL void GL_APIENTRY glMultiDrawElementsIndirectEXT (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride); +#endif +#endif /* GL_EXT_multi_draw_indirect */ + +#ifndef GL_EXT_multisampled_compatibility +#define GL_EXT_multisampled_compatibility 1 +#define GL_MULTISAMPLE_EXT 0x809D +#define GL_SAMPLE_ALPHA_TO_ONE_EXT 0x809F +#endif /* GL_EXT_multisampled_compatibility */ + +#ifndef GL_EXT_multisampled_render_to_texture +#define GL_EXT_multisampled_render_to_texture 1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT 0x8D6C +#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT 0x8D56 +#define GL_MAX_SAMPLES_EXT 0x8D57 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleEXT (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTexture2DMultisampleEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#endif +#endif /* GL_EXT_multisampled_render_to_texture */ + +#ifndef GL_EXT_multiview_draw_buffers +#define GL_EXT_multiview_draw_buffers 1 +#define GL_COLOR_ATTACHMENT_EXT 0x90F0 +#define GL_MULTIVIEW_EXT 0x90F1 +#define GL_DRAW_BUFFER_EXT 0x0C01 +#define GL_READ_BUFFER_EXT 0x0C02 +#define GL_MAX_MULTIVIEW_BUFFERS_EXT 0x90F2 +typedef void (GL_APIENTRYP PFNGLREADBUFFERINDEXEDEXTPROC) (GLenum src, GLint index); +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSINDEXEDEXTPROC) (GLint n, const GLenum *location, const GLint *indices); +typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VEXTPROC) (GLenum target, GLuint index, GLint *data); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBufferIndexedEXT (GLenum src, GLint index); +GL_APICALL void GL_APIENTRY glDrawBuffersIndexedEXT (GLint n, const GLenum *location, const GLint *indices); +GL_APICALL void GL_APIENTRY glGetIntegeri_vEXT (GLenum target, GLuint index, GLint *data); +#endif +#endif /* GL_EXT_multiview_draw_buffers */ + +#ifndef GL_EXT_occlusion_query_boolean +#define GL_EXT_occlusion_query_boolean 1 +#define GL_ANY_SAMPLES_PASSED_EXT 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE_EXT 0x8D6A +#endif /* GL_EXT_occlusion_query_boolean */ + +#ifndef GL_EXT_polygon_offset_clamp +#define GL_EXT_polygon_offset_clamp 1 +#define GL_POLYGON_OFFSET_CLAMP_EXT 0x8E1B +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETCLAMPEXTPROC) (GLfloat factor, GLfloat units, GLfloat clamp); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPolygonOffsetClampEXT (GLfloat factor, GLfloat units, GLfloat clamp); +#endif +#endif /* GL_EXT_polygon_offset_clamp */ + +#ifndef GL_EXT_post_depth_coverage +#define GL_EXT_post_depth_coverage 1 +#endif /* GL_EXT_post_depth_coverage */ + +#ifndef GL_EXT_primitive_bounding_box +#define GL_EXT_primitive_bounding_box 1 +#define GL_PRIMITIVE_BOUNDING_BOX_EXT 0x92BE +typedef void (GL_APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXEXTPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPrimitiveBoundingBoxEXT (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +#endif +#endif /* GL_EXT_primitive_bounding_box */ + +#ifndef GL_EXT_protected_textures +#define GL_EXT_protected_textures 1 +#define GL_CONTEXT_FLAG_PROTECTED_CONTENT_BIT_EXT 0x00000010 +#define GL_TEXTURE_PROTECTED_EXT 0x8BFA +#endif /* GL_EXT_protected_textures */ + +#ifndef GL_EXT_pvrtc_sRGB +#define GL_EXT_pvrtc_sRGB 1 +#define GL_COMPRESSED_SRGB_PVRTC_2BPPV1_EXT 0x8A54 +#define GL_COMPRESSED_SRGB_PVRTC_4BPPV1_EXT 0x8A55 +#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV1_EXT 0x8A56 +#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV1_EXT 0x8A57 +#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_2BPPV2_IMG 0x93F0 +#define GL_COMPRESSED_SRGB_ALPHA_PVRTC_4BPPV2_IMG 0x93F1 +#endif /* GL_EXT_pvrtc_sRGB */ + +#ifndef GL_EXT_raster_multisample +#define GL_EXT_raster_multisample 1 +#define GL_RASTER_MULTISAMPLE_EXT 0x9327 +#define GL_RASTER_SAMPLES_EXT 0x9328 +#define GL_MAX_RASTER_SAMPLES_EXT 0x9329 +#define GL_RASTER_FIXED_SAMPLE_LOCATIONS_EXT 0x932A +#define GL_MULTISAMPLE_RASTERIZATION_ALLOWED_EXT 0x932B +#define GL_EFFECTIVE_RASTER_SAMPLES_EXT 0x932C +typedef void (GL_APIENTRYP PFNGLRASTERSAMPLESEXTPROC) (GLuint samples, GLboolean fixedsamplelocations); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRasterSamplesEXT (GLuint samples, GLboolean fixedsamplelocations); +#endif +#endif /* GL_EXT_raster_multisample */ + +#ifndef GL_EXT_read_format_bgra +#define GL_EXT_read_format_bgra 1 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV_EXT 0x8365 +#define GL_UNSIGNED_SHORT_1_5_5_5_REV_EXT 0x8366 +#endif /* GL_EXT_read_format_bgra */ + +#ifndef GL_EXT_render_snorm +#define GL_EXT_render_snorm 1 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_R16_SNORM_EXT 0x8F98 +#define GL_RG16_SNORM_EXT 0x8F99 +#define GL_RGBA16_SNORM_EXT 0x8F9B +#endif /* GL_EXT_render_snorm */ + +#ifndef GL_EXT_robustness +#define GL_EXT_robustness 1 +#define GL_GUILTY_CONTEXT_RESET_EXT 0x8253 +#define GL_INNOCENT_CONTEXT_RESET_EXT 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET_EXT 0x8255 +#define GL_CONTEXT_ROBUST_ACCESS_EXT 0x90F3 +#define GL_RESET_NOTIFICATION_STRATEGY_EXT 0x8256 +#define GL_LOSE_CONTEXT_ON_RESET_EXT 0x8252 +#define GL_NO_RESET_NOTIFICATION_EXT 0x8261 +typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSEXTPROC) (void); +typedef void (GL_APIENTRYP PFNGLREADNPIXELSEXTPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVEXTPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatusEXT (void); +GL_APICALL void GL_APIENTRY glReadnPixelsEXT (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +GL_APICALL void GL_APIENTRY glGetnUniformfvEXT (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetnUniformivEXT (GLuint program, GLint location, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_EXT_robustness */ + +#ifndef GL_EXT_sRGB +#define GL_EXT_sRGB 1 +#define GL_SRGB_EXT 0x8C40 +#define GL_SRGB_ALPHA_EXT 0x8C42 +#define GL_SRGB8_ALPHA8_EXT 0x8C43 +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT 0x8210 +#endif /* GL_EXT_sRGB */ + +#ifndef GL_EXT_sRGB_write_control +#define GL_EXT_sRGB_write_control 1 +#define GL_FRAMEBUFFER_SRGB_EXT 0x8DB9 +#endif /* GL_EXT_sRGB_write_control */ + +#ifndef GL_EXT_semaphore +#define GL_EXT_semaphore 1 +#define GL_LAYOUT_GENERAL_EXT 0x958D +#define GL_LAYOUT_COLOR_ATTACHMENT_EXT 0x958E +#define GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT 0x958F +#define GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT 0x9590 +#define GL_LAYOUT_SHADER_READ_ONLY_EXT 0x9591 +#define GL_LAYOUT_TRANSFER_SRC_EXT 0x9592 +#define GL_LAYOUT_TRANSFER_DST_EXT 0x9593 +typedef void (GL_APIENTRYP PFNGLGENSEMAPHORESEXTPROC) (GLsizei n, GLuint *semaphores); +typedef void (GL_APIENTRYP PFNGLDELETESEMAPHORESEXTPROC) (GLsizei n, const GLuint *semaphores); +typedef GLboolean (GL_APIENTRYP PFNGLISSEMAPHOREEXTPROC) (GLuint semaphore); +typedef void (GL_APIENTRYP PFNGLSEMAPHOREPARAMETERUI64VEXTPROC) (GLuint semaphore, GLenum pname, const GLuint64 *params); +typedef void (GL_APIENTRYP PFNGLGETSEMAPHOREPARAMETERUI64VEXTPROC) (GLuint semaphore, GLenum pname, GLuint64 *params); +typedef void (GL_APIENTRYP PFNGLWAITSEMAPHOREEXTPROC) (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *srcLayouts); +typedef void (GL_APIENTRYP PFNGLSIGNALSEMAPHOREEXTPROC) (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *dstLayouts); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGenSemaphoresEXT (GLsizei n, GLuint *semaphores); +GL_APICALL void GL_APIENTRY glDeleteSemaphoresEXT (GLsizei n, const GLuint *semaphores); +GL_APICALL GLboolean GL_APIENTRY glIsSemaphoreEXT (GLuint semaphore); +GL_APICALL void GL_APIENTRY glSemaphoreParameterui64vEXT (GLuint semaphore, GLenum pname, const GLuint64 *params); +GL_APICALL void GL_APIENTRY glGetSemaphoreParameterui64vEXT (GLuint semaphore, GLenum pname, GLuint64 *params); +GL_APICALL void GL_APIENTRY glWaitSemaphoreEXT (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *srcLayouts); +GL_APICALL void GL_APIENTRY glSignalSemaphoreEXT (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *dstLayouts); +#endif +#endif /* GL_EXT_semaphore */ + +#ifndef GL_EXT_semaphore_fd +#define GL_EXT_semaphore_fd 1 +typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREFDEXTPROC) (GLuint semaphore, GLenum handleType, GLint fd); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glImportSemaphoreFdEXT (GLuint semaphore, GLenum handleType, GLint fd); +#endif +#endif /* GL_EXT_semaphore_fd */ + +#ifndef GL_EXT_semaphore_win32 +#define GL_EXT_semaphore_win32 1 +#define GL_HANDLE_TYPE_D3D12_FENCE_EXT 0x9594 +#define GL_D3D12_FENCE_VALUE_EXT 0x9595 +typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREWIN32HANDLEEXTPROC) (GLuint semaphore, GLenum handleType, void *handle); +typedef void (GL_APIENTRYP PFNGLIMPORTSEMAPHOREWIN32NAMEEXTPROC) (GLuint semaphore, GLenum handleType, const void *name); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glImportSemaphoreWin32HandleEXT (GLuint semaphore, GLenum handleType, void *handle); +GL_APICALL void GL_APIENTRY glImportSemaphoreWin32NameEXT (GLuint semaphore, GLenum handleType, const void *name); +#endif +#endif /* GL_EXT_semaphore_win32 */ + +#ifndef GL_EXT_separate_shader_objects +#define GL_EXT_separate_shader_objects 1 +#define GL_ACTIVE_PROGRAM_EXT 0x8259 +#define GL_VERTEX_SHADER_BIT_EXT 0x00000001 +#define GL_FRAGMENT_SHADER_BIT_EXT 0x00000002 +#define GL_ALL_SHADER_BITS_EXT 0xFFFFFFFF +#define GL_PROGRAM_SEPARABLE_EXT 0x8258 +#define GL_PROGRAM_PIPELINE_BINDING_EXT 0x825A +typedef void (GL_APIENTRYP PFNGLACTIVESHADERPROGRAMEXTPROC) (GLuint pipeline, GLuint program); +typedef void (GL_APIENTRYP PFNGLBINDPROGRAMPIPELINEEXTPROC) (GLuint pipeline); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROGRAMVEXTPROC) (GLenum type, GLsizei count, const GLchar **strings); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPIPELINESEXTPROC) (GLsizei n, const GLuint *pipelines); +typedef void (GL_APIENTRYP PFNGLGENPROGRAMPIPELINESEXTPROC) (GLsizei n, GLuint *pipelines); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGEXTPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEIVEXTPROC) (GLuint pipeline, GLenum pname, GLint *params); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPIPELINEEXTPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIEXTPROC) (GLuint program, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FEXTPROC) (GLuint program, GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IEXTPROC) (GLuint program, GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMSTAGESEXTPROC) (GLuint pipeline, GLbitfield stages, GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEEXTPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIEXTPROC) (GLuint program, GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveShaderProgramEXT (GLuint pipeline, GLuint program); +GL_APICALL void GL_APIENTRY glBindProgramPipelineEXT (GLuint pipeline); +GL_APICALL GLuint GL_APIENTRY glCreateShaderProgramvEXT (GLenum type, GLsizei count, const GLchar **strings); +GL_APICALL void GL_APIENTRY glDeleteProgramPipelinesEXT (GLsizei n, const GLuint *pipelines); +GL_APICALL void GL_APIENTRY glGenProgramPipelinesEXT (GLsizei n, GLuint *pipelines); +GL_APICALL void GL_APIENTRY glGetProgramPipelineInfoLogEXT (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetProgramPipelineivEXT (GLuint pipeline, GLenum pname, GLint *params); +GL_APICALL GLboolean GL_APIENTRY glIsProgramPipelineEXT (GLuint pipeline); +GL_APICALL void GL_APIENTRY glProgramParameteriEXT (GLuint program, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glProgramUniform1fEXT (GLuint program, GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glProgramUniform1fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform1iEXT (GLuint program, GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glProgramUniform1ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glProgramUniform2fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform2iEXT (GLuint program, GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glProgramUniform2ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glProgramUniform3fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform3iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glProgramUniform3ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glProgramUniform4fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform4iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glProgramUniform4ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgramStagesEXT (GLuint pipeline, GLbitfield stages, GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgramPipelineEXT (GLuint pipeline); +GL_APICALL void GL_APIENTRY glProgramUniform1uiEXT (GLuint program, GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#endif +#endif /* GL_EXT_separate_shader_objects */ + +#ifndef GL_EXT_shader_framebuffer_fetch +#define GL_EXT_shader_framebuffer_fetch 1 +#define GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT 0x8A52 +#endif /* GL_EXT_shader_framebuffer_fetch */ + +#ifndef GL_EXT_shader_group_vote +#define GL_EXT_shader_group_vote 1 +#endif /* GL_EXT_shader_group_vote */ + +#ifndef GL_EXT_shader_implicit_conversions +#define GL_EXT_shader_implicit_conversions 1 +#endif /* GL_EXT_shader_implicit_conversions */ + +#ifndef GL_EXT_shader_integer_mix +#define GL_EXT_shader_integer_mix 1 +#endif /* GL_EXT_shader_integer_mix */ + +#ifndef GL_EXT_shader_io_blocks +#define GL_EXT_shader_io_blocks 1 +#endif /* GL_EXT_shader_io_blocks */ + +#ifndef GL_EXT_shader_non_constant_global_initializers +#define GL_EXT_shader_non_constant_global_initializers 1 +#endif /* GL_EXT_shader_non_constant_global_initializers */ + +#ifndef GL_EXT_shader_pixel_local_storage +#define GL_EXT_shader_pixel_local_storage 1 +#define GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_FAST_SIZE_EXT 0x8F63 +#define GL_MAX_SHADER_PIXEL_LOCAL_STORAGE_SIZE_EXT 0x8F67 +#define GL_SHADER_PIXEL_LOCAL_STORAGE_EXT 0x8F64 +#endif /* GL_EXT_shader_pixel_local_storage */ + +#ifndef GL_EXT_shader_pixel_local_storage2 +#define GL_EXT_shader_pixel_local_storage2 1 +#define GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_FAST_SIZE_EXT 0x9650 +#define GL_MAX_SHADER_COMBINED_LOCAL_STORAGE_SIZE_EXT 0x9651 +#define GL_FRAMEBUFFER_INCOMPLETE_INSUFFICIENT_SHADER_COMBINED_LOCAL_STORAGE_EXT 0x9652 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERPIXELLOCALSTORAGESIZEEXTPROC) (GLuint target, GLsizei size); +typedef GLsizei (GL_APIENTRYP PFNGLGETFRAMEBUFFERPIXELLOCALSTORAGESIZEEXTPROC) (GLuint target); +typedef void (GL_APIENTRYP PFNGLCLEARPIXELLOCALSTORAGEUIEXTPROC) (GLsizei offset, GLsizei n, const GLuint *values); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferPixelLocalStorageSizeEXT (GLuint target, GLsizei size); +GL_APICALL GLsizei GL_APIENTRY glGetFramebufferPixelLocalStorageSizeEXT (GLuint target); +GL_APICALL void GL_APIENTRY glClearPixelLocalStorageuiEXT (GLsizei offset, GLsizei n, const GLuint *values); +#endif +#endif /* GL_EXT_shader_pixel_local_storage2 */ + +#ifndef GL_EXT_shader_texture_lod +#define GL_EXT_shader_texture_lod 1 +#endif /* GL_EXT_shader_texture_lod */ + +#ifndef GL_EXT_shadow_samplers +#define GL_EXT_shadow_samplers 1 +#define GL_TEXTURE_COMPARE_MODE_EXT 0x884C +#define GL_TEXTURE_COMPARE_FUNC_EXT 0x884D +#define GL_COMPARE_REF_TO_TEXTURE_EXT 0x884E +#define GL_SAMPLER_2D_SHADOW_EXT 0x8B62 +#endif /* GL_EXT_shadow_samplers */ + +#ifndef GL_EXT_sparse_texture +#define GL_EXT_sparse_texture 1 +#define GL_TEXTURE_SPARSE_EXT 0x91A6 +#define GL_VIRTUAL_PAGE_SIZE_INDEX_EXT 0x91A7 +#define GL_NUM_SPARSE_LEVELS_EXT 0x91AA +#define GL_NUM_VIRTUAL_PAGE_SIZES_EXT 0x91A8 +#define GL_VIRTUAL_PAGE_SIZE_X_EXT 0x9195 +#define GL_VIRTUAL_PAGE_SIZE_Y_EXT 0x9196 +#define GL_VIRTUAL_PAGE_SIZE_Z_EXT 0x9197 +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_3D 0x806F +#define GL_MAX_SPARSE_TEXTURE_SIZE_EXT 0x9198 +#define GL_MAX_SPARSE_3D_TEXTURE_SIZE_EXT 0x9199 +#define GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS_EXT 0x919A +#define GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_EXT 0x91A9 +typedef void (GL_APIENTRYP PFNGLTEXPAGECOMMITMENTEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexPageCommitmentEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit); +#endif +#endif /* GL_EXT_sparse_texture */ + +#ifndef GL_EXT_sparse_texture2 +#define GL_EXT_sparse_texture2 1 +#endif /* GL_EXT_sparse_texture2 */ + +#ifndef GL_EXT_tessellation_point_size +#define GL_EXT_tessellation_point_size 1 +#endif /* GL_EXT_tessellation_point_size */ + +#ifndef GL_EXT_tessellation_shader +#define GL_EXT_tessellation_shader 1 +#define GL_PATCHES_EXT 0x000E +#define GL_PATCH_VERTICES_EXT 0x8E72 +#define GL_TESS_CONTROL_OUTPUT_VERTICES_EXT 0x8E75 +#define GL_TESS_GEN_MODE_EXT 0x8E76 +#define GL_TESS_GEN_SPACING_EXT 0x8E77 +#define GL_TESS_GEN_VERTEX_ORDER_EXT 0x8E78 +#define GL_TESS_GEN_POINT_MODE_EXT 0x8E79 +#define GL_ISOLINES_EXT 0x8E7A +#define GL_QUADS_EXT 0x0007 +#define GL_FRACTIONAL_ODD_EXT 0x8E7B +#define GL_FRACTIONAL_EVEN_EXT 0x8E7C +#define GL_MAX_PATCH_VERTICES_EXT 0x8E7D +#define GL_MAX_TESS_GEN_LEVEL_EXT 0x8E7E +#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS_EXT 0x8E7F +#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT 0x8E80 +#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS_EXT 0x8E81 +#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS_EXT 0x8E82 +#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS_EXT 0x8E83 +#define GL_MAX_TESS_PATCH_COMPONENTS_EXT 0x8E84 +#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS_EXT 0x8E85 +#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS_EXT 0x8E86 +#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS_EXT 0x8E89 +#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS_EXT 0x8E8A +#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS_EXT 0x886C +#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS_EXT 0x886D +#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS_EXT 0x8E1E +#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS_EXT 0x8E1F +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS_EXT 0x92CD +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS_EXT 0x92CE +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS_EXT 0x92D3 +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS_EXT 0x92D4 +#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS_EXT 0x90CB +#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS_EXT 0x90CC +#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS_EXT 0x90D8 +#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS_EXT 0x90D9 +#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED 0x8221 +#define GL_IS_PER_PATCH_EXT 0x92E7 +#define GL_REFERENCED_BY_TESS_CONTROL_SHADER_EXT 0x9307 +#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER_EXT 0x9308 +#define GL_TESS_CONTROL_SHADER_EXT 0x8E88 +#define GL_TESS_EVALUATION_SHADER_EXT 0x8E87 +#define GL_TESS_CONTROL_SHADER_BIT_EXT 0x00000008 +#define GL_TESS_EVALUATION_SHADER_BIT_EXT 0x00000010 +typedef void (GL_APIENTRYP PFNGLPATCHPARAMETERIEXTPROC) (GLenum pname, GLint value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPatchParameteriEXT (GLenum pname, GLint value); +#endif +#endif /* GL_EXT_tessellation_shader */ + +#ifndef GL_EXT_texture_border_clamp +#define GL_EXT_texture_border_clamp 1 +#define GL_TEXTURE_BORDER_COLOR_EXT 0x1004 +#define GL_CLAMP_TO_BORDER_EXT 0x812D +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, const GLuint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIIVEXTPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIUIVEXTPROC) (GLuint sampler, GLenum pname, const GLuint *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIIVEXTPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVEXTPROC) (GLuint sampler, GLenum pname, GLuint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexParameterIivEXT (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexParameterIuivEXT (GLenum target, GLenum pname, const GLuint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIivEXT (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIuivEXT (GLenum target, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glSamplerParameterIivEXT (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterIuivEXT (GLuint sampler, GLenum pname, const GLuint *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIivEXT (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIuivEXT (GLuint sampler, GLenum pname, GLuint *params); +#endif +#endif /* GL_EXT_texture_border_clamp */ + +#ifndef GL_EXT_texture_buffer +#define GL_EXT_texture_buffer 1 +#define GL_TEXTURE_BUFFER_EXT 0x8C2A +#define GL_TEXTURE_BUFFER_BINDING_EXT 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE_EXT 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER_EXT 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT 0x8C2D +#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT_EXT 0x919F +#define GL_SAMPLER_BUFFER_EXT 0x8DC2 +#define GL_INT_SAMPLER_BUFFER_EXT 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER_EXT 0x8DD8 +#define GL_IMAGE_BUFFER_EXT 0x9051 +#define GL_INT_IMAGE_BUFFER_EXT 0x905C +#define GL_UNSIGNED_INT_IMAGE_BUFFER_EXT 0x9067 +#define GL_TEXTURE_BUFFER_OFFSET_EXT 0x919D +#define GL_TEXTURE_BUFFER_SIZE_EXT 0x919E +typedef void (GL_APIENTRYP PFNGLTEXBUFFEREXTPROC) (GLenum target, GLenum internalformat, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTEXBUFFERRANGEEXTPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexBufferEXT (GLenum target, GLenum internalformat, GLuint buffer); +GL_APICALL void GL_APIENTRY glTexBufferRangeEXT (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +#endif +#endif /* GL_EXT_texture_buffer */ + +#ifndef GL_EXT_texture_compression_astc_decode_mode +#define GL_EXT_texture_compression_astc_decode_mode 1 +#define GL_TEXTURE_ASTC_DECODE_PRECISION_EXT 0x8F69 +#endif /* GL_EXT_texture_compression_astc_decode_mode */ + +#ifndef GL_EXT_texture_compression_dxt1 +#define GL_EXT_texture_compression_dxt1 1 +#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0 +#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1 +#endif /* GL_EXT_texture_compression_dxt1 */ + +#ifndef GL_EXT_texture_compression_s3tc +#define GL_EXT_texture_compression_s3tc 1 +#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2 +#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3 +#endif /* GL_EXT_texture_compression_s3tc */ + +#ifndef GL_EXT_texture_cube_map_array +#define GL_EXT_texture_cube_map_array 1 +#define GL_TEXTURE_CUBE_MAP_ARRAY_EXT 0x9009 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_EXT 0x900A +#define GL_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_EXT 0x900D +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900E +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_EXT 0x900F +#define GL_IMAGE_CUBE_MAP_ARRAY_EXT 0x9054 +#define GL_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x905F +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x906A +#endif /* GL_EXT_texture_cube_map_array */ + +#ifndef GL_EXT_texture_filter_anisotropic +#define GL_EXT_texture_filter_anisotropic 1 +#define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE +#define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF +#endif /* GL_EXT_texture_filter_anisotropic */ + +#ifndef GL_EXT_texture_filter_minmax +#define GL_EXT_texture_filter_minmax 1 +#endif /* GL_EXT_texture_filter_minmax */ + +#ifndef GL_EXT_texture_format_BGRA8888 +#define GL_EXT_texture_format_BGRA8888 1 +#endif /* GL_EXT_texture_format_BGRA8888 */ + +#ifndef GL_EXT_texture_norm16 +#define GL_EXT_texture_norm16 1 +#define GL_R16_EXT 0x822A +#define GL_RG16_EXT 0x822C +#define GL_RGBA16_EXT 0x805B +#define GL_RGB16_EXT 0x8054 +#define GL_RGB16_SNORM_EXT 0x8F9A +#endif /* GL_EXT_texture_norm16 */ + +#ifndef GL_EXT_texture_rg +#define GL_EXT_texture_rg 1 +#define GL_RED_EXT 0x1903 +#define GL_RG_EXT 0x8227 +#define GL_R8_EXT 0x8229 +#define GL_RG8_EXT 0x822B +#endif /* GL_EXT_texture_rg */ + +#ifndef GL_EXT_texture_sRGB_R8 +#define GL_EXT_texture_sRGB_R8 1 +#define GL_SR8_EXT 0x8FBD +#endif /* GL_EXT_texture_sRGB_R8 */ + +#ifndef GL_EXT_texture_sRGB_RG8 +#define GL_EXT_texture_sRGB_RG8 1 +#define GL_SRG8_EXT 0x8FBE +#endif /* GL_EXT_texture_sRGB_RG8 */ + +#ifndef GL_EXT_texture_sRGB_decode +#define GL_EXT_texture_sRGB_decode 1 +#define GL_TEXTURE_SRGB_DECODE_EXT 0x8A48 +#define GL_DECODE_EXT 0x8A49 +#define GL_SKIP_DECODE_EXT 0x8A4A +#endif /* GL_EXT_texture_sRGB_decode */ + +#ifndef GL_EXT_texture_storage +#define GL_EXT_texture_storage 1 +#define GL_TEXTURE_IMMUTABLE_FORMAT_EXT 0x912F +#define GL_ALPHA8_EXT 0x803C +#define GL_LUMINANCE8_EXT 0x8040 +#define GL_LUMINANCE8_ALPHA8_EXT 0x8045 +#define GL_RGBA32F_EXT 0x8814 +#define GL_RGB32F_EXT 0x8815 +#define GL_ALPHA32F_EXT 0x8816 +#define GL_LUMINANCE32F_EXT 0x8818 +#define GL_LUMINANCE_ALPHA32F_EXT 0x8819 +#define GL_ALPHA16F_EXT 0x881C +#define GL_LUMINANCE16F_EXT 0x881E +#define GL_LUMINANCE_ALPHA16F_EXT 0x881F +#define GL_R32F_EXT 0x822E +#define GL_RG32F_EXT 0x8230 +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE1DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE1DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE2DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXTURESTORAGE3DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTexStorage1DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GL_APICALL void GL_APIENTRY glTexStorage2DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage3DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_APICALL void GL_APIENTRY glTextureStorage1DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); +GL_APICALL void GL_APIENTRY glTextureStorage2DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTextureStorage3DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +#endif +#endif /* GL_EXT_texture_storage */ + +#ifndef GL_EXT_texture_type_2_10_10_10_REV +#define GL_EXT_texture_type_2_10_10_10_REV 1 +#define GL_UNSIGNED_INT_2_10_10_10_REV_EXT 0x8368 +#endif /* GL_EXT_texture_type_2_10_10_10_REV */ + +#ifndef GL_EXT_texture_view +#define GL_EXT_texture_view 1 +#define GL_TEXTURE_VIEW_MIN_LEVEL_EXT 0x82DB +#define GL_TEXTURE_VIEW_NUM_LEVELS_EXT 0x82DC +#define GL_TEXTURE_VIEW_MIN_LAYER_EXT 0x82DD +#define GL_TEXTURE_VIEW_NUM_LAYERS_EXT 0x82DE +typedef void (GL_APIENTRYP PFNGLTEXTUREVIEWEXTPROC) (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glTextureViewEXT (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers); +#endif +#endif /* GL_EXT_texture_view */ + +#ifndef GL_EXT_unpack_subimage +#define GL_EXT_unpack_subimage 1 +#define GL_UNPACK_ROW_LENGTH_EXT 0x0CF2 +#define GL_UNPACK_SKIP_ROWS_EXT 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS_EXT 0x0CF4 +#endif /* GL_EXT_unpack_subimage */ + +#ifndef GL_EXT_win32_keyed_mutex +#define GL_EXT_win32_keyed_mutex 1 +typedef GLboolean (GL_APIENTRYP PFNGLACQUIREKEYEDMUTEXWIN32EXTPROC) (GLuint memory, GLuint64 key, GLuint timeout); +typedef GLboolean (GL_APIENTRYP PFNGLRELEASEKEYEDMUTEXWIN32EXTPROC) (GLuint memory, GLuint64 key); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLboolean GL_APIENTRY glAcquireKeyedMutexWin32EXT (GLuint memory, GLuint64 key, GLuint timeout); +GL_APICALL GLboolean GL_APIENTRY glReleaseKeyedMutexWin32EXT (GLuint memory, GLuint64 key); +#endif +#endif /* GL_EXT_win32_keyed_mutex */ + +#ifndef GL_EXT_window_rectangles +#define GL_EXT_window_rectangles 1 +#define GL_INCLUSIVE_EXT 0x8F10 +#define GL_EXCLUSIVE_EXT 0x8F11 +#define GL_WINDOW_RECTANGLE_EXT 0x8F12 +#define GL_WINDOW_RECTANGLE_MODE_EXT 0x8F13 +#define GL_MAX_WINDOW_RECTANGLES_EXT 0x8F14 +#define GL_NUM_WINDOW_RECTANGLES_EXT 0x8F15 +typedef void (GL_APIENTRYP PFNGLWINDOWRECTANGLESEXTPROC) (GLenum mode, GLsizei count, const GLint *box); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glWindowRectanglesEXT (GLenum mode, GLsizei count, const GLint *box); +#endif +#endif /* GL_EXT_window_rectangles */ + +#ifndef GL_FJ_shader_binary_GCCSO +#define GL_FJ_shader_binary_GCCSO 1 +#define GL_GCCSO_SHADER_BINARY_FJ 0x9260 +#endif /* GL_FJ_shader_binary_GCCSO */ + +#ifndef GL_IMG_bindless_texture +#define GL_IMG_bindless_texture 1 +typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTUREHANDLEIMGPROC) (GLuint texture); +typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTURESAMPLERHANDLEIMGPROC) (GLuint texture, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64IMGPROC) (GLint location, GLuint64 value); +typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64VIMGPROC) (GLint location, GLsizei count, const GLuint64 *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64IMGPROC) (GLuint program, GLint location, GLuint64 value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VIMGPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLuint64 GL_APIENTRY glGetTextureHandleIMG (GLuint texture); +GL_APICALL GLuint64 GL_APIENTRY glGetTextureSamplerHandleIMG (GLuint texture, GLuint sampler); +GL_APICALL void GL_APIENTRY glUniformHandleui64IMG (GLint location, GLuint64 value); +GL_APICALL void GL_APIENTRY glUniformHandleui64vIMG (GLint location, GLsizei count, const GLuint64 *value); +GL_APICALL void GL_APIENTRY glProgramUniformHandleui64IMG (GLuint program, GLint location, GLuint64 value); +GL_APICALL void GL_APIENTRY glProgramUniformHandleui64vIMG (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +#endif +#endif /* GL_IMG_bindless_texture */ + +#ifndef GL_IMG_framebuffer_downsample +#define GL_IMG_framebuffer_downsample 1 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_AND_DOWNSAMPLE_IMG 0x913C +#define GL_NUM_DOWNSAMPLE_SCALES_IMG 0x913D +#define GL_DOWNSAMPLE_SCALES_IMG 0x913E +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SCALE_IMG 0x913F +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DDOWNSAMPLEIMGPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint xscale, GLint yscale); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERDOWNSAMPLEIMGPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer, GLint xscale, GLint yscale); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTexture2DDownsampleIMG (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint xscale, GLint yscale); +GL_APICALL void GL_APIENTRY glFramebufferTextureLayerDownsampleIMG (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer, GLint xscale, GLint yscale); +#endif +#endif /* GL_IMG_framebuffer_downsample */ + +#ifndef GL_IMG_multisampled_render_to_texture +#define GL_IMG_multisampled_render_to_texture 1 +#define GL_RENDERBUFFER_SAMPLES_IMG 0x9133 +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_IMG 0x9134 +#define GL_MAX_SAMPLES_IMG 0x9135 +#define GL_TEXTURE_SAMPLES_IMG 0x9136 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEIMGPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DMULTISAMPLEIMGPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleIMG (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTexture2DMultisampleIMG (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples); +#endif +#endif /* GL_IMG_multisampled_render_to_texture */ + +#ifndef GL_IMG_program_binary +#define GL_IMG_program_binary 1 +#define GL_SGX_PROGRAM_BINARY_IMG 0x9130 +#endif /* GL_IMG_program_binary */ + +#ifndef GL_IMG_read_format +#define GL_IMG_read_format 1 +#define GL_BGRA_IMG 0x80E1 +#define GL_UNSIGNED_SHORT_4_4_4_4_REV_IMG 0x8365 +#endif /* GL_IMG_read_format */ + +#ifndef GL_IMG_shader_binary +#define GL_IMG_shader_binary 1 +#define GL_SGX_BINARY_IMG 0x8C0A +#endif /* GL_IMG_shader_binary */ + +#ifndef GL_IMG_texture_compression_pvrtc +#define GL_IMG_texture_compression_pvrtc 1 +#define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00 +#define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01 +#define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02 +#define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03 +#endif /* GL_IMG_texture_compression_pvrtc */ + +#ifndef GL_IMG_texture_compression_pvrtc2 +#define GL_IMG_texture_compression_pvrtc2 1 +#define GL_COMPRESSED_RGBA_PVRTC_2BPPV2_IMG 0x9137 +#define GL_COMPRESSED_RGBA_PVRTC_4BPPV2_IMG 0x9138 +#endif /* GL_IMG_texture_compression_pvrtc2 */ + +#ifndef GL_IMG_texture_filter_cubic +#define GL_IMG_texture_filter_cubic 1 +#define GL_CUBIC_IMG 0x9139 +#define GL_CUBIC_MIPMAP_NEAREST_IMG 0x913A +#define GL_CUBIC_MIPMAP_LINEAR_IMG 0x913B +#endif /* GL_IMG_texture_filter_cubic */ + +#ifndef GL_INTEL_conservative_rasterization +#define GL_INTEL_conservative_rasterization 1 +#define GL_CONSERVATIVE_RASTERIZATION_INTEL 0x83FE +#endif /* GL_INTEL_conservative_rasterization */ + +#ifndef GL_INTEL_framebuffer_CMAA +#define GL_INTEL_framebuffer_CMAA 1 +typedef void (GL_APIENTRYP PFNGLAPPLYFRAMEBUFFERATTACHMENTCMAAINTELPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glApplyFramebufferAttachmentCMAAINTEL (void); +#endif +#endif /* GL_INTEL_framebuffer_CMAA */ + +#ifndef GL_INTEL_performance_query +#define GL_INTEL_performance_query 1 +#define GL_PERFQUERY_SINGLE_CONTEXT_INTEL 0x00000000 +#define GL_PERFQUERY_GLOBAL_CONTEXT_INTEL 0x00000001 +#define GL_PERFQUERY_WAIT_INTEL 0x83FB +#define GL_PERFQUERY_FLUSH_INTEL 0x83FA +#define GL_PERFQUERY_DONOT_FLUSH_INTEL 0x83F9 +#define GL_PERFQUERY_COUNTER_EVENT_INTEL 0x94F0 +#define GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL 0x94F1 +#define GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL 0x94F2 +#define GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL 0x94F3 +#define GL_PERFQUERY_COUNTER_RAW_INTEL 0x94F4 +#define GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL 0x94F5 +#define GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL 0x94F8 +#define GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL 0x94F9 +#define GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL 0x94FA +#define GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL 0x94FB +#define GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL 0x94FC +#define GL_PERFQUERY_QUERY_NAME_LENGTH_MAX_INTEL 0x94FD +#define GL_PERFQUERY_COUNTER_NAME_LENGTH_MAX_INTEL 0x94FE +#define GL_PERFQUERY_COUNTER_DESC_LENGTH_MAX_INTEL 0x94FF +#define GL_PERFQUERY_GPA_EXTENDED_COUNTERS_INTEL 0x9500 +typedef void (GL_APIENTRYP PFNGLBEGINPERFQUERYINTELPROC) (GLuint queryHandle); +typedef void (GL_APIENTRYP PFNGLCREATEPERFQUERYINTELPROC) (GLuint queryId, GLuint *queryHandle); +typedef void (GL_APIENTRYP PFNGLDELETEPERFQUERYINTELPROC) (GLuint queryHandle); +typedef void (GL_APIENTRYP PFNGLENDPERFQUERYINTELPROC) (GLuint queryHandle); +typedef void (GL_APIENTRYP PFNGLGETFIRSTPERFQUERYIDINTELPROC) (GLuint *queryId); +typedef void (GL_APIENTRYP PFNGLGETNEXTPERFQUERYIDINTELPROC) (GLuint queryId, GLuint *nextQueryId); +typedef void (GL_APIENTRYP PFNGLGETPERFCOUNTERINFOINTELPROC) (GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue); +typedef void (GL_APIENTRYP PFNGLGETPERFQUERYDATAINTELPROC) (GLuint queryHandle, GLuint flags, GLsizei dataSize, GLvoid *data, GLuint *bytesWritten); +typedef void (GL_APIENTRYP PFNGLGETPERFQUERYIDBYNAMEINTELPROC) (GLchar *queryName, GLuint *queryId); +typedef void (GL_APIENTRYP PFNGLGETPERFQUERYINFOINTELPROC) (GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBeginPerfQueryINTEL (GLuint queryHandle); +GL_APICALL void GL_APIENTRY glCreatePerfQueryINTEL (GLuint queryId, GLuint *queryHandle); +GL_APICALL void GL_APIENTRY glDeletePerfQueryINTEL (GLuint queryHandle); +GL_APICALL void GL_APIENTRY glEndPerfQueryINTEL (GLuint queryHandle); +GL_APICALL void GL_APIENTRY glGetFirstPerfQueryIdINTEL (GLuint *queryId); +GL_APICALL void GL_APIENTRY glGetNextPerfQueryIdINTEL (GLuint queryId, GLuint *nextQueryId); +GL_APICALL void GL_APIENTRY glGetPerfCounterInfoINTEL (GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue); +GL_APICALL void GL_APIENTRY glGetPerfQueryDataINTEL (GLuint queryHandle, GLuint flags, GLsizei dataSize, GLvoid *data, GLuint *bytesWritten); +GL_APICALL void GL_APIENTRY glGetPerfQueryIdByNameINTEL (GLchar *queryName, GLuint *queryId); +GL_APICALL void GL_APIENTRY glGetPerfQueryInfoINTEL (GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask); +#endif +#endif /* GL_INTEL_performance_query */ + +#ifndef GL_MESA_shader_integer_functions +#define GL_MESA_shader_integer_functions 1 +#endif /* GL_MESA_shader_integer_functions */ + +#ifndef GL_NVX_blend_equation_advanced_multi_draw_buffers +#define GL_NVX_blend_equation_advanced_multi_draw_buffers 1 +#endif /* GL_NVX_blend_equation_advanced_multi_draw_buffers */ + +#ifndef GL_NV_bindless_texture +#define GL_NV_bindless_texture 1 +typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTUREHANDLENVPROC) (GLuint texture); +typedef GLuint64 (GL_APIENTRYP PFNGLGETTEXTURESAMPLERHANDLENVPROC) (GLuint texture, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLMAKETEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle); +typedef void (GL_APIENTRYP PFNGLMAKETEXTUREHANDLENONRESIDENTNVPROC) (GLuint64 handle); +typedef GLuint64 (GL_APIENTRYP PFNGLGETIMAGEHANDLENVPROC) (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format); +typedef void (GL_APIENTRYP PFNGLMAKEIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle, GLenum access); +typedef void (GL_APIENTRYP PFNGLMAKEIMAGEHANDLENONRESIDENTNVPROC) (GLuint64 handle); +typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64NVPROC) (GLint location, GLuint64 value); +typedef void (GL_APIENTRYP PFNGLUNIFORMHANDLEUI64VNVPROC) (GLint location, GLsizei count, const GLuint64 *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64NVPROC) (GLuint program, GLint location, GLuint64 value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle); +typedef GLboolean (GL_APIENTRYP PFNGLISIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLuint64 GL_APIENTRY glGetTextureHandleNV (GLuint texture); +GL_APICALL GLuint64 GL_APIENTRY glGetTextureSamplerHandleNV (GLuint texture, GLuint sampler); +GL_APICALL void GL_APIENTRY glMakeTextureHandleResidentNV (GLuint64 handle); +GL_APICALL void GL_APIENTRY glMakeTextureHandleNonResidentNV (GLuint64 handle); +GL_APICALL GLuint64 GL_APIENTRY glGetImageHandleNV (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format); +GL_APICALL void GL_APIENTRY glMakeImageHandleResidentNV (GLuint64 handle, GLenum access); +GL_APICALL void GL_APIENTRY glMakeImageHandleNonResidentNV (GLuint64 handle); +GL_APICALL void GL_APIENTRY glUniformHandleui64NV (GLint location, GLuint64 value); +GL_APICALL void GL_APIENTRY glUniformHandleui64vNV (GLint location, GLsizei count, const GLuint64 *value); +GL_APICALL void GL_APIENTRY glProgramUniformHandleui64NV (GLuint program, GLint location, GLuint64 value); +GL_APICALL void GL_APIENTRY glProgramUniformHandleui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64 *values); +GL_APICALL GLboolean GL_APIENTRY glIsTextureHandleResidentNV (GLuint64 handle); +GL_APICALL GLboolean GL_APIENTRY glIsImageHandleResidentNV (GLuint64 handle); +#endif +#endif /* GL_NV_bindless_texture */ + +#ifndef GL_NV_blend_equation_advanced +#define GL_NV_blend_equation_advanced 1 +#define GL_BLEND_OVERLAP_NV 0x9281 +#define GL_BLEND_PREMULTIPLIED_SRC_NV 0x9280 +#define GL_BLUE_NV 0x1905 +#define GL_COLORBURN_NV 0x929A +#define GL_COLORDODGE_NV 0x9299 +#define GL_CONJOINT_NV 0x9284 +#define GL_CONTRAST_NV 0x92A1 +#define GL_DARKEN_NV 0x9297 +#define GL_DIFFERENCE_NV 0x929E +#define GL_DISJOINT_NV 0x9283 +#define GL_DST_ATOP_NV 0x928F +#define GL_DST_IN_NV 0x928B +#define GL_DST_NV 0x9287 +#define GL_DST_OUT_NV 0x928D +#define GL_DST_OVER_NV 0x9289 +#define GL_EXCLUSION_NV 0x92A0 +#define GL_GREEN_NV 0x1904 +#define GL_HARDLIGHT_NV 0x929B +#define GL_HARDMIX_NV 0x92A9 +#define GL_HSL_COLOR_NV 0x92AF +#define GL_HSL_HUE_NV 0x92AD +#define GL_HSL_LUMINOSITY_NV 0x92B0 +#define GL_HSL_SATURATION_NV 0x92AE +#define GL_INVERT_OVG_NV 0x92B4 +#define GL_INVERT_RGB_NV 0x92A3 +#define GL_LIGHTEN_NV 0x9298 +#define GL_LINEARBURN_NV 0x92A5 +#define GL_LINEARDODGE_NV 0x92A4 +#define GL_LINEARLIGHT_NV 0x92A7 +#define GL_MINUS_CLAMPED_NV 0x92B3 +#define GL_MINUS_NV 0x929F +#define GL_MULTIPLY_NV 0x9294 +#define GL_OVERLAY_NV 0x9296 +#define GL_PINLIGHT_NV 0x92A8 +#define GL_PLUS_CLAMPED_ALPHA_NV 0x92B2 +#define GL_PLUS_CLAMPED_NV 0x92B1 +#define GL_PLUS_DARKER_NV 0x9292 +#define GL_PLUS_NV 0x9291 +#define GL_RED_NV 0x1903 +#define GL_SCREEN_NV 0x9295 +#define GL_SOFTLIGHT_NV 0x929C +#define GL_SRC_ATOP_NV 0x928E +#define GL_SRC_IN_NV 0x928A +#define GL_SRC_NV 0x9286 +#define GL_SRC_OUT_NV 0x928C +#define GL_SRC_OVER_NV 0x9288 +#define GL_UNCORRELATED_NV 0x9282 +#define GL_VIVIDLIGHT_NV 0x92A6 +#define GL_XOR_NV 0x1506 +typedef void (GL_APIENTRYP PFNGLBLENDPARAMETERINVPROC) (GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLBLENDBARRIERNVPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlendParameteriNV (GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glBlendBarrierNV (void); +#endif +#endif /* GL_NV_blend_equation_advanced */ + +#ifndef GL_NV_blend_equation_advanced_coherent +#define GL_NV_blend_equation_advanced_coherent 1 +#define GL_BLEND_ADVANCED_COHERENT_NV 0x9285 +#endif /* GL_NV_blend_equation_advanced_coherent */ + +#ifndef GL_NV_conditional_render +#define GL_NV_conditional_render 1 +#define GL_QUERY_WAIT_NV 0x8E13 +#define GL_QUERY_NO_WAIT_NV 0x8E14 +#define GL_QUERY_BY_REGION_WAIT_NV 0x8E15 +#define GL_QUERY_BY_REGION_NO_WAIT_NV 0x8E16 +typedef void (GL_APIENTRYP PFNGLBEGINCONDITIONALRENDERNVPROC) (GLuint id, GLenum mode); +typedef void (GL_APIENTRYP PFNGLENDCONDITIONALRENDERNVPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBeginConditionalRenderNV (GLuint id, GLenum mode); +GL_APICALL void GL_APIENTRY glEndConditionalRenderNV (void); +#endif +#endif /* GL_NV_conditional_render */ + +#ifndef GL_NV_conservative_raster +#define GL_NV_conservative_raster 1 +#define GL_CONSERVATIVE_RASTERIZATION_NV 0x9346 +#define GL_SUBPIXEL_PRECISION_BIAS_X_BITS_NV 0x9347 +#define GL_SUBPIXEL_PRECISION_BIAS_Y_BITS_NV 0x9348 +#define GL_MAX_SUBPIXEL_PRECISION_BIAS_BITS_NV 0x9349 +typedef void (GL_APIENTRYP PFNGLSUBPIXELPRECISIONBIASNVPROC) (GLuint xbits, GLuint ybits); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glSubpixelPrecisionBiasNV (GLuint xbits, GLuint ybits); +#endif +#endif /* GL_NV_conservative_raster */ + +#ifndef GL_NV_conservative_raster_pre_snap_triangles +#define GL_NV_conservative_raster_pre_snap_triangles 1 +#define GL_CONSERVATIVE_RASTER_MODE_NV 0x954D +#define GL_CONSERVATIVE_RASTER_MODE_POST_SNAP_NV 0x954E +#define GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_TRIANGLES_NV 0x954F +typedef void (GL_APIENTRYP PFNGLCONSERVATIVERASTERPARAMETERINVPROC) (GLenum pname, GLint param); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glConservativeRasterParameteriNV (GLenum pname, GLint param); +#endif +#endif /* GL_NV_conservative_raster_pre_snap_triangles */ + +#ifndef GL_NV_copy_buffer +#define GL_NV_copy_buffer 1 +#define GL_COPY_READ_BUFFER_NV 0x8F36 +#define GL_COPY_WRITE_BUFFER_NV 0x8F37 +typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATANVPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCopyBufferSubDataNV (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +#endif +#endif /* GL_NV_copy_buffer */ + +#ifndef GL_NV_coverage_sample +#define GL_NV_coverage_sample 1 +#define GL_COVERAGE_COMPONENT_NV 0x8ED0 +#define GL_COVERAGE_COMPONENT4_NV 0x8ED1 +#define GL_COVERAGE_ATTACHMENT_NV 0x8ED2 +#define GL_COVERAGE_BUFFERS_NV 0x8ED3 +#define GL_COVERAGE_SAMPLES_NV 0x8ED4 +#define GL_COVERAGE_ALL_FRAGMENTS_NV 0x8ED5 +#define GL_COVERAGE_EDGE_FRAGMENTS_NV 0x8ED6 +#define GL_COVERAGE_AUTOMATIC_NV 0x8ED7 +#define GL_COVERAGE_BUFFER_BIT_NV 0x00008000 +typedef void (GL_APIENTRYP PFNGLCOVERAGEMASKNVPROC) (GLboolean mask); +typedef void (GL_APIENTRYP PFNGLCOVERAGEOPERATIONNVPROC) (GLenum operation); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCoverageMaskNV (GLboolean mask); +GL_APICALL void GL_APIENTRY glCoverageOperationNV (GLenum operation); +#endif +#endif /* GL_NV_coverage_sample */ + +#ifndef GL_NV_depth_nonlinear +#define GL_NV_depth_nonlinear 1 +#define GL_DEPTH_COMPONENT16_NONLINEAR_NV 0x8E2C +#endif /* GL_NV_depth_nonlinear */ + +#ifndef GL_NV_draw_buffers +#define GL_NV_draw_buffers 1 +#define GL_MAX_DRAW_BUFFERS_NV 0x8824 +#define GL_DRAW_BUFFER0_NV 0x8825 +#define GL_DRAW_BUFFER1_NV 0x8826 +#define GL_DRAW_BUFFER2_NV 0x8827 +#define GL_DRAW_BUFFER3_NV 0x8828 +#define GL_DRAW_BUFFER4_NV 0x8829 +#define GL_DRAW_BUFFER5_NV 0x882A +#define GL_DRAW_BUFFER6_NV 0x882B +#define GL_DRAW_BUFFER7_NV 0x882C +#define GL_DRAW_BUFFER8_NV 0x882D +#define GL_DRAW_BUFFER9_NV 0x882E +#define GL_DRAW_BUFFER10_NV 0x882F +#define GL_DRAW_BUFFER11_NV 0x8830 +#define GL_DRAW_BUFFER12_NV 0x8831 +#define GL_DRAW_BUFFER13_NV 0x8832 +#define GL_DRAW_BUFFER14_NV 0x8833 +#define GL_DRAW_BUFFER15_NV 0x8834 +#define GL_COLOR_ATTACHMENT0_NV 0x8CE0 +#define GL_COLOR_ATTACHMENT1_NV 0x8CE1 +#define GL_COLOR_ATTACHMENT2_NV 0x8CE2 +#define GL_COLOR_ATTACHMENT3_NV 0x8CE3 +#define GL_COLOR_ATTACHMENT4_NV 0x8CE4 +#define GL_COLOR_ATTACHMENT5_NV 0x8CE5 +#define GL_COLOR_ATTACHMENT6_NV 0x8CE6 +#define GL_COLOR_ATTACHMENT7_NV 0x8CE7 +#define GL_COLOR_ATTACHMENT8_NV 0x8CE8 +#define GL_COLOR_ATTACHMENT9_NV 0x8CE9 +#define GL_COLOR_ATTACHMENT10_NV 0x8CEA +#define GL_COLOR_ATTACHMENT11_NV 0x8CEB +#define GL_COLOR_ATTACHMENT12_NV 0x8CEC +#define GL_COLOR_ATTACHMENT13_NV 0x8CED +#define GL_COLOR_ATTACHMENT14_NV 0x8CEE +#define GL_COLOR_ATTACHMENT15_NV 0x8CEF +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSNVPROC) (GLsizei n, const GLenum *bufs); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawBuffersNV (GLsizei n, const GLenum *bufs); +#endif +#endif /* GL_NV_draw_buffers */ + +#ifndef GL_NV_draw_instanced +#define GL_NV_draw_instanced 1 +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDNVPROC) (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDNVPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawArraysInstancedNV (GLenum mode, GLint first, GLsizei count, GLsizei primcount); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedNV (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount); +#endif +#endif /* GL_NV_draw_instanced */ + +#ifndef GL_NV_draw_vulkan_image +#define GL_NV_draw_vulkan_image 1 +typedef void (GL_APIENTRY *GLVULKANPROCNV)(void); +typedef void (GL_APIENTRYP PFNGLDRAWVKIMAGENVPROC) (GLuint64 vkImage, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1); +typedef GLVULKANPROCNV (GL_APIENTRYP PFNGLGETVKPROCADDRNVPROC) (const GLchar *name); +typedef void (GL_APIENTRYP PFNGLWAITVKSEMAPHORENVPROC) (GLuint64 vkSemaphore); +typedef void (GL_APIENTRYP PFNGLSIGNALVKSEMAPHORENVPROC) (GLuint64 vkSemaphore); +typedef void (GL_APIENTRYP PFNGLSIGNALVKFENCENVPROC) (GLuint64 vkFence); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDrawVkImageNV (GLuint64 vkImage, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1); +GL_APICALL GLVULKANPROCNV GL_APIENTRY glGetVkProcAddrNV (const GLchar *name); +GL_APICALL void GL_APIENTRY glWaitVkSemaphoreNV (GLuint64 vkSemaphore); +GL_APICALL void GL_APIENTRY glSignalVkSemaphoreNV (GLuint64 vkSemaphore); +GL_APICALL void GL_APIENTRY glSignalVkFenceNV (GLuint64 vkFence); +#endif +#endif /* GL_NV_draw_vulkan_image */ + +#ifndef GL_NV_explicit_attrib_location +#define GL_NV_explicit_attrib_location 1 +#endif /* GL_NV_explicit_attrib_location */ + +#ifndef GL_NV_fbo_color_attachments +#define GL_NV_fbo_color_attachments 1 +#define GL_MAX_COLOR_ATTACHMENTS_NV 0x8CDF +#endif /* GL_NV_fbo_color_attachments */ + +#ifndef GL_NV_fence +#define GL_NV_fence 1 +#define GL_ALL_COMPLETED_NV 0x84F2 +#define GL_FENCE_STATUS_NV 0x84F3 +#define GL_FENCE_CONDITION_NV 0x84F4 +typedef void (GL_APIENTRYP PFNGLDELETEFENCESNVPROC) (GLsizei n, const GLuint *fences); +typedef void (GL_APIENTRYP PFNGLGENFENCESNVPROC) (GLsizei n, GLuint *fences); +typedef GLboolean (GL_APIENTRYP PFNGLISFENCENVPROC) (GLuint fence); +typedef GLboolean (GL_APIENTRYP PFNGLTESTFENCENVPROC) (GLuint fence); +typedef void (GL_APIENTRYP PFNGLGETFENCEIVNVPROC) (GLuint fence, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLFINISHFENCENVPROC) (GLuint fence); +typedef void (GL_APIENTRYP PFNGLSETFENCENVPROC) (GLuint fence, GLenum condition); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDeleteFencesNV (GLsizei n, const GLuint *fences); +GL_APICALL void GL_APIENTRY glGenFencesNV (GLsizei n, GLuint *fences); +GL_APICALL GLboolean GL_APIENTRY glIsFenceNV (GLuint fence); +GL_APICALL GLboolean GL_APIENTRY glTestFenceNV (GLuint fence); +GL_APICALL void GL_APIENTRY glGetFenceivNV (GLuint fence, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glFinishFenceNV (GLuint fence); +GL_APICALL void GL_APIENTRY glSetFenceNV (GLuint fence, GLenum condition); +#endif +#endif /* GL_NV_fence */ + +#ifndef GL_NV_fill_rectangle +#define GL_NV_fill_rectangle 1 +#define GL_FILL_RECTANGLE_NV 0x933C +#endif /* GL_NV_fill_rectangle */ + +#ifndef GL_NV_fragment_coverage_to_color +#define GL_NV_fragment_coverage_to_color 1 +#define GL_FRAGMENT_COVERAGE_TO_COLOR_NV 0x92DD +#define GL_FRAGMENT_COVERAGE_COLOR_NV 0x92DE +typedef void (GL_APIENTRYP PFNGLFRAGMENTCOVERAGECOLORNVPROC) (GLuint color); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFragmentCoverageColorNV (GLuint color); +#endif +#endif /* GL_NV_fragment_coverage_to_color */ + +#ifndef GL_NV_fragment_shader_interlock +#define GL_NV_fragment_shader_interlock 1 +#endif /* GL_NV_fragment_shader_interlock */ + +#ifndef GL_NV_framebuffer_blit +#define GL_NV_framebuffer_blit 1 +#define GL_READ_FRAMEBUFFER_NV 0x8CA8 +#define GL_DRAW_FRAMEBUFFER_NV 0x8CA9 +#define GL_DRAW_FRAMEBUFFER_BINDING_NV 0x8CA6 +#define GL_READ_FRAMEBUFFER_BINDING_NV 0x8CAA +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERNVPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlitFramebufferNV (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +#endif +#endif /* GL_NV_framebuffer_blit */ + +#ifndef GL_NV_framebuffer_mixed_samples +#define GL_NV_framebuffer_mixed_samples 1 +#define GL_COVERAGE_MODULATION_TABLE_NV 0x9331 +#define GL_COLOR_SAMPLES_NV 0x8E20 +#define GL_DEPTH_SAMPLES_NV 0x932D +#define GL_STENCIL_SAMPLES_NV 0x932E +#define GL_MIXED_DEPTH_SAMPLES_SUPPORTED_NV 0x932F +#define GL_MIXED_STENCIL_SAMPLES_SUPPORTED_NV 0x9330 +#define GL_COVERAGE_MODULATION_NV 0x9332 +#define GL_COVERAGE_MODULATION_TABLE_SIZE_NV 0x9333 +typedef void (GL_APIENTRYP PFNGLCOVERAGEMODULATIONTABLENVPROC) (GLsizei n, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLGETCOVERAGEMODULATIONTABLENVPROC) (GLsizei bufsize, GLfloat *v); +typedef void (GL_APIENTRYP PFNGLCOVERAGEMODULATIONNVPROC) (GLenum components); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glCoverageModulationTableNV (GLsizei n, const GLfloat *v); +GL_APICALL void GL_APIENTRY glGetCoverageModulationTableNV (GLsizei bufsize, GLfloat *v); +GL_APICALL void GL_APIENTRY glCoverageModulationNV (GLenum components); +#endif +#endif /* GL_NV_framebuffer_mixed_samples */ + +#ifndef GL_NV_framebuffer_multisample +#define GL_NV_framebuffer_multisample 1 +#define GL_RENDERBUFFER_SAMPLES_NV 0x8CAB +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_NV 0x8D56 +#define GL_MAX_SAMPLES_NV 0x8D57 +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLENVPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisampleNV (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +#endif +#endif /* GL_NV_framebuffer_multisample */ + +#ifndef GL_NV_generate_mipmap_sRGB +#define GL_NV_generate_mipmap_sRGB 1 +#endif /* GL_NV_generate_mipmap_sRGB */ + +#ifndef GL_NV_geometry_shader_passthrough +#define GL_NV_geometry_shader_passthrough 1 +#endif /* GL_NV_geometry_shader_passthrough */ + +#ifndef GL_NV_gpu_shader5 +#define GL_NV_gpu_shader5 1 +typedef khronos_int64_t GLint64EXT; +typedef khronos_uint64_t GLuint64EXT; +#define GL_INT64_NV 0x140E +#define GL_UNSIGNED_INT64_NV 0x140F +#define GL_INT8_NV 0x8FE0 +#define GL_INT8_VEC2_NV 0x8FE1 +#define GL_INT8_VEC3_NV 0x8FE2 +#define GL_INT8_VEC4_NV 0x8FE3 +#define GL_INT16_NV 0x8FE4 +#define GL_INT16_VEC2_NV 0x8FE5 +#define GL_INT16_VEC3_NV 0x8FE6 +#define GL_INT16_VEC4_NV 0x8FE7 +#define GL_INT64_VEC2_NV 0x8FE9 +#define GL_INT64_VEC3_NV 0x8FEA +#define GL_INT64_VEC4_NV 0x8FEB +#define GL_UNSIGNED_INT8_NV 0x8FEC +#define GL_UNSIGNED_INT8_VEC2_NV 0x8FED +#define GL_UNSIGNED_INT8_VEC3_NV 0x8FEE +#define GL_UNSIGNED_INT8_VEC4_NV 0x8FEF +#define GL_UNSIGNED_INT16_NV 0x8FF0 +#define GL_UNSIGNED_INT16_VEC2_NV 0x8FF1 +#define GL_UNSIGNED_INT16_VEC3_NV 0x8FF2 +#define GL_UNSIGNED_INT16_VEC4_NV 0x8FF3 +#define GL_UNSIGNED_INT64_VEC2_NV 0x8FF5 +#define GL_UNSIGNED_INT64_VEC3_NV 0x8FF6 +#define GL_UNSIGNED_INT64_VEC4_NV 0x8FF7 +#define GL_FLOAT16_NV 0x8FF8 +#define GL_FLOAT16_VEC2_NV 0x8FF9 +#define GL_FLOAT16_VEC3_NV 0x8FFA +#define GL_FLOAT16_VEC4_NV 0x8FFB +#define GL_PATCHES 0x000E +typedef void (GL_APIENTRYP PFNGLUNIFORM1I64NVPROC) (GLint location, GLint64EXT x); +typedef void (GL_APIENTRYP PFNGLUNIFORM2I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y); +typedef void (GL_APIENTRYP PFNGLUNIFORM3I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +typedef void (GL_APIENTRYP PFNGLUNIFORM4I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +typedef void (GL_APIENTRYP PFNGLUNIFORM1I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UI64NVPROC) (GLint location, GLuint64EXT x); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMI64VNVPROC) (GLuint program, GLint location, GLint64EXT *params); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1I64NVPROC) (GLuint program, GLint location, GLint64EXT x); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glUniform1i64NV (GLint location, GLint64EXT x); +GL_APICALL void GL_APIENTRY glUniform2i64NV (GLint location, GLint64EXT x, GLint64EXT y); +GL_APICALL void GL_APIENTRY glUniform3i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +GL_APICALL void GL_APIENTRY glUniform4i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +GL_APICALL void GL_APIENTRY glUniform1i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform2i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform3i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform4i64vNV (GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform1ui64NV (GLint location, GLuint64EXT x); +GL_APICALL void GL_APIENTRY glUniform2ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y); +GL_APICALL void GL_APIENTRY glUniform3ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +GL_APICALL void GL_APIENTRY glUniform4ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +GL_APICALL void GL_APIENTRY glUniform1ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform2ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform3ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glUniform4ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glGetUniformi64vNV (GLuint program, GLint location, GLint64EXT *params); +GL_APICALL void GL_APIENTRY glProgramUniform1i64NV (GLuint program, GLint location, GLint64EXT x); +GL_APICALL void GL_APIENTRY glProgramUniform2i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y); +GL_APICALL void GL_APIENTRY glProgramUniform3i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z); +GL_APICALL void GL_APIENTRY glProgramUniform4i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w); +GL_APICALL void GL_APIENTRY glProgramUniform1i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform2i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform3i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform4i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform1ui64NV (GLuint program, GLint location, GLuint64EXT x); +GL_APICALL void GL_APIENTRY glProgramUniform2ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y); +GL_APICALL void GL_APIENTRY glProgramUniform3ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z); +GL_APICALL void GL_APIENTRY glProgramUniform4ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w); +GL_APICALL void GL_APIENTRY glProgramUniform1ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform2ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform3ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +GL_APICALL void GL_APIENTRY glProgramUniform4ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value); +#endif +#endif /* GL_NV_gpu_shader5 */ + +#ifndef GL_NV_image_formats +#define GL_NV_image_formats 1 +#endif /* GL_NV_image_formats */ + +#ifndef GL_NV_instanced_arrays +#define GL_NV_instanced_arrays 1 +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_NV 0x88FE +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORNVPROC) (GLuint index, GLuint divisor); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glVertexAttribDivisorNV (GLuint index, GLuint divisor); +#endif +#endif /* GL_NV_instanced_arrays */ + +#ifndef GL_NV_internalformat_sample_query +#define GL_NV_internalformat_sample_query 1 +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102 +#define GL_MULTISAMPLES_NV 0x9371 +#define GL_SUPERSAMPLE_SCALE_X_NV 0x9372 +#define GL_SUPERSAMPLE_SCALE_Y_NV 0x9373 +#define GL_CONFORMANT_NV 0x9374 +typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATSAMPLEIVNVPROC) (GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei bufSize, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetInternalformatSampleivNV (GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_NV_internalformat_sample_query */ + +#ifndef GL_NV_non_square_matrices +#define GL_NV_non_square_matrices 1 +#define GL_FLOAT_MAT2x3_NV 0x8B65 +#define GL_FLOAT_MAT2x4_NV 0x8B66 +#define GL_FLOAT_MAT3x2_NV 0x8B67 +#define GL_FLOAT_MAT3x4_NV 0x8B68 +#define GL_FLOAT_MAT4x2_NV 0x8B69 +#define GL_FLOAT_MAT4x3_NV 0x8B6A +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVNVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glUniformMatrix2x3fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x2fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2x4fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x2fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x4fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x3fvNV (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +#endif +#endif /* GL_NV_non_square_matrices */ + +#ifndef GL_NV_path_rendering +#define GL_NV_path_rendering 1 +#define GL_PATH_FORMAT_SVG_NV 0x9070 +#define GL_PATH_FORMAT_PS_NV 0x9071 +#define GL_STANDARD_FONT_NAME_NV 0x9072 +#define GL_SYSTEM_FONT_NAME_NV 0x9073 +#define GL_FILE_NAME_NV 0x9074 +#define GL_PATH_STROKE_WIDTH_NV 0x9075 +#define GL_PATH_END_CAPS_NV 0x9076 +#define GL_PATH_INITIAL_END_CAP_NV 0x9077 +#define GL_PATH_TERMINAL_END_CAP_NV 0x9078 +#define GL_PATH_JOIN_STYLE_NV 0x9079 +#define GL_PATH_MITER_LIMIT_NV 0x907A +#define GL_PATH_DASH_CAPS_NV 0x907B +#define GL_PATH_INITIAL_DASH_CAP_NV 0x907C +#define GL_PATH_TERMINAL_DASH_CAP_NV 0x907D +#define GL_PATH_DASH_OFFSET_NV 0x907E +#define GL_PATH_CLIENT_LENGTH_NV 0x907F +#define GL_PATH_FILL_MODE_NV 0x9080 +#define GL_PATH_FILL_MASK_NV 0x9081 +#define GL_PATH_FILL_COVER_MODE_NV 0x9082 +#define GL_PATH_STROKE_COVER_MODE_NV 0x9083 +#define GL_PATH_STROKE_MASK_NV 0x9084 +#define GL_COUNT_UP_NV 0x9088 +#define GL_COUNT_DOWN_NV 0x9089 +#define GL_PATH_OBJECT_BOUNDING_BOX_NV 0x908A +#define GL_CONVEX_HULL_NV 0x908B +#define GL_BOUNDING_BOX_NV 0x908D +#define GL_TRANSLATE_X_NV 0x908E +#define GL_TRANSLATE_Y_NV 0x908F +#define GL_TRANSLATE_2D_NV 0x9090 +#define GL_TRANSLATE_3D_NV 0x9091 +#define GL_AFFINE_2D_NV 0x9092 +#define GL_AFFINE_3D_NV 0x9094 +#define GL_TRANSPOSE_AFFINE_2D_NV 0x9096 +#define GL_TRANSPOSE_AFFINE_3D_NV 0x9098 +#define GL_UTF8_NV 0x909A +#define GL_UTF16_NV 0x909B +#define GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV 0x909C +#define GL_PATH_COMMAND_COUNT_NV 0x909D +#define GL_PATH_COORD_COUNT_NV 0x909E +#define GL_PATH_DASH_ARRAY_COUNT_NV 0x909F +#define GL_PATH_COMPUTED_LENGTH_NV 0x90A0 +#define GL_PATH_FILL_BOUNDING_BOX_NV 0x90A1 +#define GL_PATH_STROKE_BOUNDING_BOX_NV 0x90A2 +#define GL_SQUARE_NV 0x90A3 +#define GL_ROUND_NV 0x90A4 +#define GL_TRIANGULAR_NV 0x90A5 +#define GL_BEVEL_NV 0x90A6 +#define GL_MITER_REVERT_NV 0x90A7 +#define GL_MITER_TRUNCATE_NV 0x90A8 +#define GL_SKIP_MISSING_GLYPH_NV 0x90A9 +#define GL_USE_MISSING_GLYPH_NV 0x90AA +#define GL_PATH_ERROR_POSITION_NV 0x90AB +#define GL_ACCUM_ADJACENT_PAIRS_NV 0x90AD +#define GL_ADJACENT_PAIRS_NV 0x90AE +#define GL_FIRST_TO_REST_NV 0x90AF +#define GL_PATH_GEN_MODE_NV 0x90B0 +#define GL_PATH_GEN_COEFF_NV 0x90B1 +#define GL_PATH_GEN_COMPONENTS_NV 0x90B3 +#define GL_PATH_STENCIL_FUNC_NV 0x90B7 +#define GL_PATH_STENCIL_REF_NV 0x90B8 +#define GL_PATH_STENCIL_VALUE_MASK_NV 0x90B9 +#define GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV 0x90BD +#define GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV 0x90BE +#define GL_PATH_COVER_DEPTH_FUNC_NV 0x90BF +#define GL_PATH_DASH_OFFSET_RESET_NV 0x90B4 +#define GL_MOVE_TO_RESETS_NV 0x90B5 +#define GL_MOVE_TO_CONTINUES_NV 0x90B6 +#define GL_CLOSE_PATH_NV 0x00 +#define GL_MOVE_TO_NV 0x02 +#define GL_RELATIVE_MOVE_TO_NV 0x03 +#define GL_LINE_TO_NV 0x04 +#define GL_RELATIVE_LINE_TO_NV 0x05 +#define GL_HORIZONTAL_LINE_TO_NV 0x06 +#define GL_RELATIVE_HORIZONTAL_LINE_TO_NV 0x07 +#define GL_VERTICAL_LINE_TO_NV 0x08 +#define GL_RELATIVE_VERTICAL_LINE_TO_NV 0x09 +#define GL_QUADRATIC_CURVE_TO_NV 0x0A +#define GL_RELATIVE_QUADRATIC_CURVE_TO_NV 0x0B +#define GL_CUBIC_CURVE_TO_NV 0x0C +#define GL_RELATIVE_CUBIC_CURVE_TO_NV 0x0D +#define GL_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0E +#define GL_RELATIVE_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0F +#define GL_SMOOTH_CUBIC_CURVE_TO_NV 0x10 +#define GL_RELATIVE_SMOOTH_CUBIC_CURVE_TO_NV 0x11 +#define GL_SMALL_CCW_ARC_TO_NV 0x12 +#define GL_RELATIVE_SMALL_CCW_ARC_TO_NV 0x13 +#define GL_SMALL_CW_ARC_TO_NV 0x14 +#define GL_RELATIVE_SMALL_CW_ARC_TO_NV 0x15 +#define GL_LARGE_CCW_ARC_TO_NV 0x16 +#define GL_RELATIVE_LARGE_CCW_ARC_TO_NV 0x17 +#define GL_LARGE_CW_ARC_TO_NV 0x18 +#define GL_RELATIVE_LARGE_CW_ARC_TO_NV 0x19 +#define GL_RESTART_PATH_NV 0xF0 +#define GL_DUP_FIRST_CUBIC_CURVE_TO_NV 0xF2 +#define GL_DUP_LAST_CUBIC_CURVE_TO_NV 0xF4 +#define GL_RECT_NV 0xF6 +#define GL_CIRCULAR_CCW_ARC_TO_NV 0xF8 +#define GL_CIRCULAR_CW_ARC_TO_NV 0xFA +#define GL_CIRCULAR_TANGENT_ARC_TO_NV 0xFC +#define GL_ARC_TO_NV 0xFE +#define GL_RELATIVE_ARC_TO_NV 0xFF +#define GL_BOLD_BIT_NV 0x01 +#define GL_ITALIC_BIT_NV 0x02 +#define GL_GLYPH_WIDTH_BIT_NV 0x01 +#define GL_GLYPH_HEIGHT_BIT_NV 0x02 +#define GL_GLYPH_HORIZONTAL_BEARING_X_BIT_NV 0x04 +#define GL_GLYPH_HORIZONTAL_BEARING_Y_BIT_NV 0x08 +#define GL_GLYPH_HORIZONTAL_BEARING_ADVANCE_BIT_NV 0x10 +#define GL_GLYPH_VERTICAL_BEARING_X_BIT_NV 0x20 +#define GL_GLYPH_VERTICAL_BEARING_Y_BIT_NV 0x40 +#define GL_GLYPH_VERTICAL_BEARING_ADVANCE_BIT_NV 0x80 +#define GL_GLYPH_HAS_KERNING_BIT_NV 0x100 +#define GL_FONT_X_MIN_BOUNDS_BIT_NV 0x00010000 +#define GL_FONT_Y_MIN_BOUNDS_BIT_NV 0x00020000 +#define GL_FONT_X_MAX_BOUNDS_BIT_NV 0x00040000 +#define GL_FONT_Y_MAX_BOUNDS_BIT_NV 0x00080000 +#define GL_FONT_UNITS_PER_EM_BIT_NV 0x00100000 +#define GL_FONT_ASCENDER_BIT_NV 0x00200000 +#define GL_FONT_DESCENDER_BIT_NV 0x00400000 +#define GL_FONT_HEIGHT_BIT_NV 0x00800000 +#define GL_FONT_MAX_ADVANCE_WIDTH_BIT_NV 0x01000000 +#define GL_FONT_MAX_ADVANCE_HEIGHT_BIT_NV 0x02000000 +#define GL_FONT_UNDERLINE_POSITION_BIT_NV 0x04000000 +#define GL_FONT_UNDERLINE_THICKNESS_BIT_NV 0x08000000 +#define GL_FONT_HAS_KERNING_BIT_NV 0x10000000 +#define GL_ROUNDED_RECT_NV 0xE8 +#define GL_RELATIVE_ROUNDED_RECT_NV 0xE9 +#define GL_ROUNDED_RECT2_NV 0xEA +#define GL_RELATIVE_ROUNDED_RECT2_NV 0xEB +#define GL_ROUNDED_RECT4_NV 0xEC +#define GL_RELATIVE_ROUNDED_RECT4_NV 0xED +#define GL_ROUNDED_RECT8_NV 0xEE +#define GL_RELATIVE_ROUNDED_RECT8_NV 0xEF +#define GL_RELATIVE_RECT_NV 0xF7 +#define GL_FONT_GLYPHS_AVAILABLE_NV 0x9368 +#define GL_FONT_TARGET_UNAVAILABLE_NV 0x9369 +#define GL_FONT_UNAVAILABLE_NV 0x936A +#define GL_FONT_UNINTELLIGIBLE_NV 0x936B +#define GL_CONIC_CURVE_TO_NV 0x1A +#define GL_RELATIVE_CONIC_CURVE_TO_NV 0x1B +#define GL_FONT_NUM_GLYPH_INDICES_BIT_NV 0x20000000 +#define GL_STANDARD_FONT_FORMAT_NV 0x936C +#define GL_PATH_PROJECTION_NV 0x1701 +#define GL_PATH_MODELVIEW_NV 0x1700 +#define GL_PATH_MODELVIEW_STACK_DEPTH_NV 0x0BA3 +#define GL_PATH_MODELVIEW_MATRIX_NV 0x0BA6 +#define GL_PATH_MAX_MODELVIEW_STACK_DEPTH_NV 0x0D36 +#define GL_PATH_TRANSPOSE_MODELVIEW_MATRIX_NV 0x84E3 +#define GL_PATH_PROJECTION_STACK_DEPTH_NV 0x0BA4 +#define GL_PATH_PROJECTION_MATRIX_NV 0x0BA7 +#define GL_PATH_MAX_PROJECTION_STACK_DEPTH_NV 0x0D38 +#define GL_PATH_TRANSPOSE_PROJECTION_MATRIX_NV 0x84E4 +#define GL_FRAGMENT_INPUT_NV 0x936D +typedef GLuint (GL_APIENTRYP PFNGLGENPATHSNVPROC) (GLsizei range); +typedef void (GL_APIENTRYP PFNGLDELETEPATHSNVPROC) (GLuint path, GLsizei range); +typedef GLboolean (GL_APIENTRYP PFNGLISPATHNVPROC) (GLuint path); +typedef void (GL_APIENTRYP PFNGLPATHCOMMANDSNVPROC) (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords); +typedef void (GL_APIENTRYP PFNGLPATHCOORDSNVPROC) (GLuint path, GLsizei numCoords, GLenum coordType, const void *coords); +typedef void (GL_APIENTRYP PFNGLPATHSUBCOMMANDSNVPROC) (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords); +typedef void (GL_APIENTRYP PFNGLPATHSUBCOORDSNVPROC) (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords); +typedef void (GL_APIENTRYP PFNGLPATHSTRINGNVPROC) (GLuint path, GLenum format, GLsizei length, const void *pathString); +typedef void (GL_APIENTRYP PFNGLPATHGLYPHSNVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef void (GL_APIENTRYP PFNGLPATHGLYPHRANGENVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef void (GL_APIENTRYP PFNGLWEIGHTPATHSNVPROC) (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights); +typedef void (GL_APIENTRYP PFNGLCOPYPATHNVPROC) (GLuint resultPath, GLuint srcPath); +typedef void (GL_APIENTRYP PFNGLINTERPOLATEPATHSNVPROC) (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight); +typedef void (GL_APIENTRYP PFNGLTRANSFORMPATHNVPROC) (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPATHPARAMETERINVPROC) (GLuint path, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPATHPARAMETERFNVPROC) (GLuint path, GLenum pname, GLfloat value); +typedef void (GL_APIENTRYP PFNGLPATHDASHARRAYNVPROC) (GLuint path, GLsizei dashCount, const GLfloat *dashArray); +typedef void (GL_APIENTRYP PFNGLPATHSTENCILFUNCNVPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLPATHSTENCILDEPTHOFFSETNVPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLSTENCILFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLSTENCILSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLPATHCOVERDEPTHFUNCNVPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLCOVERFILLPATHNVPROC) (GLuint path, GLenum coverMode); +typedef void (GL_APIENTRYP PFNGLCOVERSTROKEPATHNVPROC) (GLuint path, GLenum coverMode); +typedef void (GL_APIENTRYP PFNGLCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLGETPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, GLint *value); +typedef void (GL_APIENTRYP PFNGLGETPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, GLfloat *value); +typedef void (GL_APIENTRYP PFNGLGETPATHCOMMANDSNVPROC) (GLuint path, GLubyte *commands); +typedef void (GL_APIENTRYP PFNGLGETPATHCOORDSNVPROC) (GLuint path, GLfloat *coords); +typedef void (GL_APIENTRYP PFNGLGETPATHDASHARRAYNVPROC) (GLuint path, GLfloat *dashArray); +typedef void (GL_APIENTRYP PFNGLGETPATHMETRICSNVPROC) (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics); +typedef void (GL_APIENTRYP PFNGLGETPATHMETRICRANGENVPROC) (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics); +typedef void (GL_APIENTRYP PFNGLGETPATHSPACINGNVPROC) (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing); +typedef GLboolean (GL_APIENTRYP PFNGLISPOINTINFILLPATHNVPROC) (GLuint path, GLuint mask, GLfloat x, GLfloat y); +typedef GLboolean (GL_APIENTRYP PFNGLISPOINTINSTROKEPATHNVPROC) (GLuint path, GLfloat x, GLfloat y); +typedef GLfloat (GL_APIENTRYP PFNGLGETPATHLENGTHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments); +typedef GLboolean (GL_APIENTRYP PFNGLPOINTALONGPATHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY); +typedef void (GL_APIENTRYP PFNGLMATRIXLOAD3X2FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXLOAD3X3FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXLOADTRANSPOSE3X3FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXMULT3X2FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXMULT3X3FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLMATRIXMULTTRANSPOSE3X3FNVPROC) (GLenum matrixMode, const GLfloat *m); +typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode); +typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask, GLenum coverMode); +typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef void (GL_APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +typedef GLenum (GL_APIENTRYP PFNGLPATHGLYPHINDEXRANGENVPROC) (GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint baseAndCount[2]); +typedef GLenum (GL_APIENTRYP PFNGLPATHGLYPHINDEXARRAYNVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef GLenum (GL_APIENTRYP PFNGLPATHMEMORYGLYPHINDEXARRAYNVPROC) (GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +typedef void (GL_APIENTRYP PFNGLPROGRAMPATHFRAGMENTINPUTGENNVPROC) (GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEFVNVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLfloat *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL GLuint GL_APIENTRY glGenPathsNV (GLsizei range); +GL_APICALL void GL_APIENTRY glDeletePathsNV (GLuint path, GLsizei range); +GL_APICALL GLboolean GL_APIENTRY glIsPathNV (GLuint path); +GL_APICALL void GL_APIENTRY glPathCommandsNV (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords); +GL_APICALL void GL_APIENTRY glPathCoordsNV (GLuint path, GLsizei numCoords, GLenum coordType, const void *coords); +GL_APICALL void GL_APIENTRY glPathSubCommandsNV (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords); +GL_APICALL void GL_APIENTRY glPathSubCoordsNV (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords); +GL_APICALL void GL_APIENTRY glPathStringNV (GLuint path, GLenum format, GLsizei length, const void *pathString); +GL_APICALL void GL_APIENTRY glPathGlyphsNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GL_APICALL void GL_APIENTRY glPathGlyphRangeNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GL_APICALL void GL_APIENTRY glWeightPathsNV (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights); +GL_APICALL void GL_APIENTRY glCopyPathNV (GLuint resultPath, GLuint srcPath); +GL_APICALL void GL_APIENTRY glInterpolatePathsNV (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight); +GL_APICALL void GL_APIENTRY glTransformPathNV (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glPathParameterivNV (GLuint path, GLenum pname, const GLint *value); +GL_APICALL void GL_APIENTRY glPathParameteriNV (GLuint path, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glPathParameterfvNV (GLuint path, GLenum pname, const GLfloat *value); +GL_APICALL void GL_APIENTRY glPathParameterfNV (GLuint path, GLenum pname, GLfloat value); +GL_APICALL void GL_APIENTRY glPathDashArrayNV (GLuint path, GLsizei dashCount, const GLfloat *dashArray); +GL_APICALL void GL_APIENTRY glPathStencilFuncNV (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glPathStencilDepthOffsetNV (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glStencilFillPathNV (GLuint path, GLenum fillMode, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilStrokePathNV (GLuint path, GLint reference, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glStencilStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glPathCoverDepthFuncNV (GLenum func); +GL_APICALL void GL_APIENTRY glCoverFillPathNV (GLuint path, GLenum coverMode); +GL_APICALL void GL_APIENTRY glCoverStrokePathNV (GLuint path, GLenum coverMode); +GL_APICALL void GL_APIENTRY glCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glGetPathParameterivNV (GLuint path, GLenum pname, GLint *value); +GL_APICALL void GL_APIENTRY glGetPathParameterfvNV (GLuint path, GLenum pname, GLfloat *value); +GL_APICALL void GL_APIENTRY glGetPathCommandsNV (GLuint path, GLubyte *commands); +GL_APICALL void GL_APIENTRY glGetPathCoordsNV (GLuint path, GLfloat *coords); +GL_APICALL void GL_APIENTRY glGetPathDashArrayNV (GLuint path, GLfloat *dashArray); +GL_APICALL void GL_APIENTRY glGetPathMetricsNV (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics); +GL_APICALL void GL_APIENTRY glGetPathMetricRangeNV (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics); +GL_APICALL void GL_APIENTRY glGetPathSpacingNV (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing); +GL_APICALL GLboolean GL_APIENTRY glIsPointInFillPathNV (GLuint path, GLuint mask, GLfloat x, GLfloat y); +GL_APICALL GLboolean GL_APIENTRY glIsPointInStrokePathNV (GLuint path, GLfloat x, GLfloat y); +GL_APICALL GLfloat GL_APIENTRY glGetPathLengthNV (GLuint path, GLsizei startSegment, GLsizei numSegments); +GL_APICALL GLboolean GL_APIENTRY glPointAlongPathNV (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY); +GL_APICALL void GL_APIENTRY glMatrixLoad3x2fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixLoad3x3fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixLoadTranspose3x3fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixMult3x2fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixMult3x3fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glMatrixMultTranspose3x3fNV (GLenum matrixMode, const GLfloat *m); +GL_APICALL void GL_APIENTRY glStencilThenCoverFillPathNV (GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode); +GL_APICALL void GL_APIENTRY glStencilThenCoverStrokePathNV (GLuint path, GLint reference, GLuint mask, GLenum coverMode); +GL_APICALL void GL_APIENTRY glStencilThenCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GL_APICALL void GL_APIENTRY glStencilThenCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues); +GL_APICALL GLenum GL_APIENTRY glPathGlyphIndexRangeNV (GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint baseAndCount[2]); +GL_APICALL GLenum GL_APIENTRY glPathGlyphIndexArrayNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GL_APICALL GLenum GL_APIENTRY glPathMemoryGlyphIndexArrayNV (GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale); +GL_APICALL void GL_APIENTRY glProgramPathFragmentInputGenNV (GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs); +GL_APICALL void GL_APIENTRY glGetProgramResourcefvNV (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLfloat *params); +#endif +#endif /* GL_NV_path_rendering */ + +#ifndef GL_NV_path_rendering_shared_edge +#define GL_NV_path_rendering_shared_edge 1 +#define GL_SHARED_EDGE_NV 0xC0 +#endif /* GL_NV_path_rendering_shared_edge */ + +#ifndef GL_NV_polygon_mode +#define GL_NV_polygon_mode 1 +#define GL_POLYGON_MODE_NV 0x0B40 +#define GL_POLYGON_OFFSET_POINT_NV 0x2A01 +#define GL_POLYGON_OFFSET_LINE_NV 0x2A02 +#define GL_POINT_NV 0x1B00 +#define GL_LINE_NV 0x1B01 +#define GL_FILL_NV 0x1B02 +typedef void (GL_APIENTRYP PFNGLPOLYGONMODENVPROC) (GLenum face, GLenum mode); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glPolygonModeNV (GLenum face, GLenum mode); +#endif +#endif /* GL_NV_polygon_mode */ + +#ifndef GL_NV_read_buffer +#define GL_NV_read_buffer 1 +#define GL_READ_BUFFER_NV 0x0C02 +typedef void (GL_APIENTRYP PFNGLREADBUFFERNVPROC) (GLenum mode); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBufferNV (GLenum mode); +#endif +#endif /* GL_NV_read_buffer */ + +#ifndef GL_NV_read_buffer_front +#define GL_NV_read_buffer_front 1 +#endif /* GL_NV_read_buffer_front */ + +#ifndef GL_NV_read_depth +#define GL_NV_read_depth 1 +#endif /* GL_NV_read_depth */ + +#ifndef GL_NV_read_depth_stencil +#define GL_NV_read_depth_stencil 1 +#endif /* GL_NV_read_depth_stencil */ + +#ifndef GL_NV_read_stencil +#define GL_NV_read_stencil 1 +#endif /* GL_NV_read_stencil */ + +#ifndef GL_NV_sRGB_formats +#define GL_NV_sRGB_formats 1 +#define GL_SLUMINANCE_NV 0x8C46 +#define GL_SLUMINANCE_ALPHA_NV 0x8C44 +#define GL_SRGB8_NV 0x8C41 +#define GL_SLUMINANCE8_NV 0x8C47 +#define GL_SLUMINANCE8_ALPHA8_NV 0x8C45 +#define GL_COMPRESSED_SRGB_S3TC_DXT1_NV 0x8C4C +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_NV 0x8C4D +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_NV 0x8C4E +#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_NV 0x8C4F +#define GL_ETC1_SRGB8_NV 0x88EE +#endif /* GL_NV_sRGB_formats */ + +#ifndef GL_NV_sample_locations +#define GL_NV_sample_locations 1 +#define GL_SAMPLE_LOCATION_SUBPIXEL_BITS_NV 0x933D +#define GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_NV 0x933E +#define GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_NV 0x933F +#define GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_NV 0x9340 +#define GL_SAMPLE_LOCATION_NV 0x8E50 +#define GL_PROGRAMMABLE_SAMPLE_LOCATION_NV 0x9341 +#define GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_NV 0x9342 +#define GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_NV 0x9343 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLenum target, GLuint start, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLNAMEDFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLRESOLVEDEPTHVALUESNVPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferSampleLocationsfvNV (GLenum target, GLuint start, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glNamedFramebufferSampleLocationsfvNV (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glResolveDepthValuesNV (void); +#endif +#endif /* GL_NV_sample_locations */ + +#ifndef GL_NV_sample_mask_override_coverage +#define GL_NV_sample_mask_override_coverage 1 +#endif /* GL_NV_sample_mask_override_coverage */ + +#ifndef GL_NV_shader_atomic_fp16_vector +#define GL_NV_shader_atomic_fp16_vector 1 +#endif /* GL_NV_shader_atomic_fp16_vector */ + +#ifndef GL_NV_shader_noperspective_interpolation +#define GL_NV_shader_noperspective_interpolation 1 +#endif /* GL_NV_shader_noperspective_interpolation */ + +#ifndef GL_NV_shadow_samplers_array +#define GL_NV_shadow_samplers_array 1 +#define GL_SAMPLER_2D_ARRAY_SHADOW_NV 0x8DC4 +#endif /* GL_NV_shadow_samplers_array */ + +#ifndef GL_NV_shadow_samplers_cube +#define GL_NV_shadow_samplers_cube 1 +#define GL_SAMPLER_CUBE_SHADOW_NV 0x8DC5 +#endif /* GL_NV_shadow_samplers_cube */ + +#ifndef GL_NV_texture_border_clamp +#define GL_NV_texture_border_clamp 1 +#define GL_TEXTURE_BORDER_COLOR_NV 0x1004 +#define GL_CLAMP_TO_BORDER_NV 0x812D +#endif /* GL_NV_texture_border_clamp */ + +#ifndef GL_NV_texture_compression_s3tc_update +#define GL_NV_texture_compression_s3tc_update 1 +#endif /* GL_NV_texture_compression_s3tc_update */ + +#ifndef GL_NV_texture_npot_2D_mipmap +#define GL_NV_texture_npot_2D_mipmap 1 +#endif /* GL_NV_texture_npot_2D_mipmap */ + +#ifndef GL_NV_viewport_array +#define GL_NV_viewport_array 1 +#define GL_MAX_VIEWPORTS_NV 0x825B +#define GL_VIEWPORT_SUBPIXEL_BITS_NV 0x825C +#define GL_VIEWPORT_BOUNDS_RANGE_NV 0x825D +#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV 0x825F +typedef void (GL_APIENTRYP PFNGLVIEWPORTARRAYVNVPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFNVPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +typedef void (GL_APIENTRYP PFNGLVIEWPORTINDEXEDFVNVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLSCISSORARRAYVNVPROC) (GLuint first, GLsizei count, const GLint *v); +typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDNVPROC) (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSCISSORINDEXEDVNVPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEARRAYFVNVPROC) (GLuint first, GLsizei count, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEINDEXEDFNVPROC) (GLuint index, GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLGETFLOATI_VNVPROC) (GLenum target, GLuint index, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLENABLEINVPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDISABLEINVPROC) (GLenum target, GLuint index); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDINVPROC) (GLenum target, GLuint index); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glViewportArrayvNV (GLuint first, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glViewportIndexedfNV (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h); +GL_APICALL void GL_APIENTRY glViewportIndexedfvNV (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glScissorArrayvNV (GLuint first, GLsizei count, const GLint *v); +GL_APICALL void GL_APIENTRY glScissorIndexedNV (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glScissorIndexedvNV (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glDepthRangeArrayfvNV (GLuint first, GLsizei count, const GLfloat *v); +GL_APICALL void GL_APIENTRY glDepthRangeIndexedfNV (GLuint index, GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glGetFloati_vNV (GLenum target, GLuint index, GLfloat *data); +GL_APICALL void GL_APIENTRY glEnableiNV (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisableiNV (GLenum target, GLuint index); +GL_APICALL GLboolean GL_APIENTRY glIsEnablediNV (GLenum target, GLuint index); +#endif +#endif /* GL_NV_viewport_array */ + +#ifndef GL_NV_viewport_array2 +#define GL_NV_viewport_array2 1 +#endif /* GL_NV_viewport_array2 */ + +#ifndef GL_NV_viewport_swizzle +#define GL_NV_viewport_swizzle 1 +#define GL_VIEWPORT_SWIZZLE_POSITIVE_X_NV 0x9350 +#define GL_VIEWPORT_SWIZZLE_NEGATIVE_X_NV 0x9351 +#define GL_VIEWPORT_SWIZZLE_POSITIVE_Y_NV 0x9352 +#define GL_VIEWPORT_SWIZZLE_NEGATIVE_Y_NV 0x9353 +#define GL_VIEWPORT_SWIZZLE_POSITIVE_Z_NV 0x9354 +#define GL_VIEWPORT_SWIZZLE_NEGATIVE_Z_NV 0x9355 +#define GL_VIEWPORT_SWIZZLE_POSITIVE_W_NV 0x9356 +#define GL_VIEWPORT_SWIZZLE_NEGATIVE_W_NV 0x9357 +#define GL_VIEWPORT_SWIZZLE_X_NV 0x9358 +#define GL_VIEWPORT_SWIZZLE_Y_NV 0x9359 +#define GL_VIEWPORT_SWIZZLE_Z_NV 0x935A +#define GL_VIEWPORT_SWIZZLE_W_NV 0x935B +typedef void (GL_APIENTRYP PFNGLVIEWPORTSWIZZLENVPROC) (GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glViewportSwizzleNV (GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew); +#endif +#endif /* GL_NV_viewport_swizzle */ + +#ifndef GL_OVR_multiview +#define GL_OVR_multiview 1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_NUM_VIEWS_OVR 0x9630 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_BASE_VIEW_INDEX_OVR 0x9632 +#define GL_MAX_VIEWS_OVR 0x9631 +#define GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR 0x9633 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTextureMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); +#endif +#endif /* GL_OVR_multiview */ + +#ifndef GL_OVR_multiview2 +#define GL_OVR_multiview2 1 +#endif /* GL_OVR_multiview2 */ + +#ifndef GL_OVR_multiview_multisampled_render_to_texture +#define GL_OVR_multiview_multisampled_render_to_texture 1 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTISAMPLEMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLsizei samples, GLint baseViewIndex, GLsizei numViews); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferTextureMultisampleMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLsizei samples, GLint baseViewIndex, GLsizei numViews); +#endif +#endif /* GL_OVR_multiview_multisampled_render_to_texture */ + +#ifndef GL_QCOM_alpha_test +#define GL_QCOM_alpha_test 1 +#define GL_ALPHA_TEST_QCOM 0x0BC0 +#define GL_ALPHA_TEST_FUNC_QCOM 0x0BC1 +#define GL_ALPHA_TEST_REF_QCOM 0x0BC2 +typedef void (GL_APIENTRYP PFNGLALPHAFUNCQCOMPROC) (GLenum func, GLclampf ref); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glAlphaFuncQCOM (GLenum func, GLclampf ref); +#endif +#endif /* GL_QCOM_alpha_test */ + +#ifndef GL_QCOM_binning_control +#define GL_QCOM_binning_control 1 +#define GL_BINNING_CONTROL_HINT_QCOM 0x8FB0 +#define GL_CPU_OPTIMIZED_QCOM 0x8FB1 +#define GL_GPU_OPTIMIZED_QCOM 0x8FB2 +#define GL_RENDER_DIRECT_TO_FRAMEBUFFER_QCOM 0x8FB3 +#endif /* GL_QCOM_binning_control */ + +#ifndef GL_QCOM_driver_control +#define GL_QCOM_driver_control 1 +typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSQCOMPROC) (GLint *num, GLsizei size, GLuint *driverControls); +typedef void (GL_APIENTRYP PFNGLGETDRIVERCONTROLSTRINGQCOMPROC) (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString); +typedef void (GL_APIENTRYP PFNGLENABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl); +typedef void (GL_APIENTRYP PFNGLDISABLEDRIVERCONTROLQCOMPROC) (GLuint driverControl); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glGetDriverControlsQCOM (GLint *num, GLsizei size, GLuint *driverControls); +GL_APICALL void GL_APIENTRY glGetDriverControlStringQCOM (GLuint driverControl, GLsizei bufSize, GLsizei *length, GLchar *driverControlString); +GL_APICALL void GL_APIENTRY glEnableDriverControlQCOM (GLuint driverControl); +GL_APICALL void GL_APIENTRY glDisableDriverControlQCOM (GLuint driverControl); +#endif +#endif /* GL_QCOM_driver_control */ + +#ifndef GL_QCOM_extended_get +#define GL_QCOM_extended_get 1 +#define GL_TEXTURE_WIDTH_QCOM 0x8BD2 +#define GL_TEXTURE_HEIGHT_QCOM 0x8BD3 +#define GL_TEXTURE_DEPTH_QCOM 0x8BD4 +#define GL_TEXTURE_INTERNAL_FORMAT_QCOM 0x8BD5 +#define GL_TEXTURE_FORMAT_QCOM 0x8BD6 +#define GL_TEXTURE_TYPE_QCOM 0x8BD7 +#define GL_TEXTURE_IMAGE_VALID_QCOM 0x8BD8 +#define GL_TEXTURE_NUM_LEVELS_QCOM 0x8BD9 +#define GL_TEXTURE_TARGET_QCOM 0x8BDA +#define GL_TEXTURE_OBJECT_VALID_QCOM 0x8BDB +#define GL_STATE_RESTORE 0x8BDC +typedef void (GL_APIENTRYP PFNGLEXTGETTEXTURESQCOMPROC) (GLuint *textures, GLint maxTextures, GLint *numTextures); +typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERSQCOMPROC) (GLuint *buffers, GLint maxBuffers, GLint *numBuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETRENDERBUFFERSQCOMPROC) (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETFRAMEBUFFERSQCOMPROC) (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers); +typedef void (GL_APIENTRYP PFNGLEXTGETTEXLEVELPARAMETERIVQCOMPROC) (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLEXTTEXOBJECTSTATEOVERRIDEIQCOMPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLEXTGETTEXSUBIMAGEQCOMPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels); +typedef void (GL_APIENTRYP PFNGLEXTGETBUFFERPOINTERVQCOMPROC) (GLenum target, void **params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glExtGetTexturesQCOM (GLuint *textures, GLint maxTextures, GLint *numTextures); +GL_APICALL void GL_APIENTRY glExtGetBuffersQCOM (GLuint *buffers, GLint maxBuffers, GLint *numBuffers); +GL_APICALL void GL_APIENTRY glExtGetRenderbuffersQCOM (GLuint *renderbuffers, GLint maxRenderbuffers, GLint *numRenderbuffers); +GL_APICALL void GL_APIENTRY glExtGetFramebuffersQCOM (GLuint *framebuffers, GLint maxFramebuffers, GLint *numFramebuffers); +GL_APICALL void GL_APIENTRY glExtGetTexLevelParameterivQCOM (GLuint texture, GLenum face, GLint level, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glExtTexObjectStateOverrideiQCOM (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glExtGetTexSubImageQCOM (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, void *texels); +GL_APICALL void GL_APIENTRY glExtGetBufferPointervQCOM (GLenum target, void **params); +#endif +#endif /* GL_QCOM_extended_get */ + +#ifndef GL_QCOM_extended_get2 +#define GL_QCOM_extended_get2 1 +typedef void (GL_APIENTRYP PFNGLEXTGETSHADERSQCOMPROC) (GLuint *shaders, GLint maxShaders, GLint *numShaders); +typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMSQCOMPROC) (GLuint *programs, GLint maxPrograms, GLint *numPrograms); +typedef GLboolean (GL_APIENTRYP PFNGLEXTISPROGRAMBINARYQCOMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLEXTGETPROGRAMBINARYSOURCEQCOMPROC) (GLuint program, GLenum shadertype, GLchar *source, GLint *length); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glExtGetShadersQCOM (GLuint *shaders, GLint maxShaders, GLint *numShaders); +GL_APICALL void GL_APIENTRY glExtGetProgramsQCOM (GLuint *programs, GLint maxPrograms, GLint *numPrograms); +GL_APICALL GLboolean GL_APIENTRY glExtIsProgramBinaryQCOM (GLuint program); +GL_APICALL void GL_APIENTRY glExtGetProgramBinarySourceQCOM (GLuint program, GLenum shadertype, GLchar *source, GLint *length); +#endif +#endif /* GL_QCOM_extended_get2 */ + +#ifndef GL_QCOM_framebuffer_foveated +#define GL_QCOM_framebuffer_foveated 1 +#define GL_FOVEATION_ENABLE_BIT_QCOM 0x00000001 +#define GL_FOVEATION_SCALED_BIN_METHOD_BIT_QCOM 0x00000002 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFOVEATIONCONFIGQCOMPROC) (GLuint framebuffer, GLuint numLayers, GLuint focalPointsPerLayer, GLuint requestedFeatures, GLuint *providedFeatures); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFOVEATIONPARAMETERSQCOMPROC) (GLuint framebuffer, GLuint layer, GLuint focalPoint, GLfloat focalX, GLfloat focalY, GLfloat gainX, GLfloat gainY, GLfloat foveaArea); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferFoveationConfigQCOM (GLuint framebuffer, GLuint numLayers, GLuint focalPointsPerLayer, GLuint requestedFeatures, GLuint *providedFeatures); +GL_APICALL void GL_APIENTRY glFramebufferFoveationParametersQCOM (GLuint framebuffer, GLuint layer, GLuint focalPoint, GLfloat focalX, GLfloat focalY, GLfloat gainX, GLfloat gainY, GLfloat foveaArea); +#endif +#endif /* GL_QCOM_framebuffer_foveated */ + +#ifndef GL_QCOM_perfmon_global_mode +#define GL_QCOM_perfmon_global_mode 1 +#define GL_PERFMON_GLOBAL_MODE_QCOM 0x8FA0 +#endif /* GL_QCOM_perfmon_global_mode */ + +#ifndef GL_QCOM_shader_framebuffer_fetch_noncoherent +#define GL_QCOM_shader_framebuffer_fetch_noncoherent 1 +#define GL_FRAMEBUFFER_FETCH_NONCOHERENT_QCOM 0x96A2 +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERFETCHBARRIERQCOMPROC) (void); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glFramebufferFetchBarrierQCOM (void); +#endif +#endif /* GL_QCOM_shader_framebuffer_fetch_noncoherent */ + +#ifndef GL_QCOM_tiled_rendering +#define GL_QCOM_tiled_rendering 1 +#define GL_COLOR_BUFFER_BIT0_QCOM 0x00000001 +#define GL_COLOR_BUFFER_BIT1_QCOM 0x00000002 +#define GL_COLOR_BUFFER_BIT2_QCOM 0x00000004 +#define GL_COLOR_BUFFER_BIT3_QCOM 0x00000008 +#define GL_COLOR_BUFFER_BIT4_QCOM 0x00000010 +#define GL_COLOR_BUFFER_BIT5_QCOM 0x00000020 +#define GL_COLOR_BUFFER_BIT6_QCOM 0x00000040 +#define GL_COLOR_BUFFER_BIT7_QCOM 0x00000080 +#define GL_DEPTH_BUFFER_BIT0_QCOM 0x00000100 +#define GL_DEPTH_BUFFER_BIT1_QCOM 0x00000200 +#define GL_DEPTH_BUFFER_BIT2_QCOM 0x00000400 +#define GL_DEPTH_BUFFER_BIT3_QCOM 0x00000800 +#define GL_DEPTH_BUFFER_BIT4_QCOM 0x00001000 +#define GL_DEPTH_BUFFER_BIT5_QCOM 0x00002000 +#define GL_DEPTH_BUFFER_BIT6_QCOM 0x00004000 +#define GL_DEPTH_BUFFER_BIT7_QCOM 0x00008000 +#define GL_STENCIL_BUFFER_BIT0_QCOM 0x00010000 +#define GL_STENCIL_BUFFER_BIT1_QCOM 0x00020000 +#define GL_STENCIL_BUFFER_BIT2_QCOM 0x00040000 +#define GL_STENCIL_BUFFER_BIT3_QCOM 0x00080000 +#define GL_STENCIL_BUFFER_BIT4_QCOM 0x00100000 +#define GL_STENCIL_BUFFER_BIT5_QCOM 0x00200000 +#define GL_STENCIL_BUFFER_BIT6_QCOM 0x00400000 +#define GL_STENCIL_BUFFER_BIT7_QCOM 0x00800000 +#define GL_MULTISAMPLE_BUFFER_BIT0_QCOM 0x01000000 +#define GL_MULTISAMPLE_BUFFER_BIT1_QCOM 0x02000000 +#define GL_MULTISAMPLE_BUFFER_BIT2_QCOM 0x04000000 +#define GL_MULTISAMPLE_BUFFER_BIT3_QCOM 0x08000000 +#define GL_MULTISAMPLE_BUFFER_BIT4_QCOM 0x10000000 +#define GL_MULTISAMPLE_BUFFER_BIT5_QCOM 0x20000000 +#define GL_MULTISAMPLE_BUFFER_BIT6_QCOM 0x40000000 +#define GL_MULTISAMPLE_BUFFER_BIT7_QCOM 0x80000000 +typedef void (GL_APIENTRYP PFNGLSTARTTILINGQCOMPROC) (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask); +typedef void (GL_APIENTRYP PFNGLENDTILINGQCOMPROC) (GLbitfield preserveMask); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glStartTilingQCOM (GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask); +GL_APICALL void GL_APIENTRY glEndTilingQCOM (GLbitfield preserveMask); +#endif +#endif /* GL_QCOM_tiled_rendering */ + +#ifndef GL_QCOM_writeonly_rendering +#define GL_QCOM_writeonly_rendering 1 +#define GL_WRITEONLY_RENDERING_QCOM 0x8823 +#endif /* GL_QCOM_writeonly_rendering */ + +#ifndef GL_VIV_shader_binary +#define GL_VIV_shader_binary 1 +#define GL_SHADER_BINARY_VIV 0x8FC4 +#endif /* GL_VIV_shader_binary */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/GLES2/gl2platform.h b/sources/pvrsdk/Include/GLES2/gl2platform.h new file mode 100755 index 00000000..8b7e9d82 --- /dev/null +++ b/sources/pvrsdk/Include/GLES2/gl2platform.h @@ -0,0 +1,38 @@ +#ifndef __gl2platform_h_ +#define __gl2platform_h_ + +/* +** Copyright (c) 2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* Platform-specific types and definitions for OpenGL ES 2.X gl2.h + * + * Adopters may modify khrplatform.h and this file to suit their platform. + * Please contribute modifications back to Khronos as pull requests on the + * public github repository: + * https://github.com/KhronosGroup/OpenGL-Registry + */ + +#include "KHR/khrplatform.h" + +#ifndef GL_APICALL +#define GL_APICALL KHRONOS_APICALL +#endif + +#ifndef GL_APIENTRY +#define GL_APIENTRY KHRONOS_APIENTRY +#endif + +#endif /* __gl2platform_h_ */ diff --git a/sources/pvrsdk/Include/GLES3/gl3.h b/sources/pvrsdk/Include/GLES3/gl3.h new file mode 100755 index 00000000..d29f4307 --- /dev/null +++ b/sources/pvrsdk/Include/GLES3/gl3.h @@ -0,0 +1,1211 @@ +#ifndef __gl3_h_ +#define __gl3_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** https://github.com/KhronosGroup/OpenGL-Registry +*/ + +#include + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +#ifndef GL_GLES_PROTOTYPES +#define GL_GLES_PROTOTYPES 1 +#endif + +/* Generated on date 20170209 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9]|3\.0 + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_ES_VERSION_2_0 +#define GL_ES_VERSION_2_0 1 +#include +typedef khronos_int8_t GLbyte; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLfixed; +typedef short GLshort; +typedef unsigned short GLushort; +typedef void GLvoid; +typedef struct __GLsync *GLsync; +typedef khronos_int64_t GLint64; +typedef khronos_uint64_t GLuint64; +typedef unsigned int GLenum; +typedef unsigned int GLuint; +typedef char GLchar; +typedef khronos_float_t GLfloat; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_uint8_t GLubyte; +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_FUNC_ADD 0x8006 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_BLEND 0x0BE2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_LINE_WIDTH 0x0B21 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VIEWPORT 0x0BA2 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_SHADER_TYPE 0x8B4F +#define GL_DELETE_STATUS 0x8B80 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_INVERT 0x150A +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGB565 0x8D62 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_NONE 0 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s); +typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures); +typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer); +typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode); +typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width); +typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture); +GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target); +GL_APICALL void GL_APIENTRY glClear (GLbitfield mask); +GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d); +GL_APICALL void GL_APIENTRY glClearStencil (GLint s); +GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader); +GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL GLuint GL_APIENTRY glCreateProgram (void); +GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type); +GL_APICALL void GL_APIENTRY glCullFace (GLenum mode); +GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program); +GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader); +GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func); +GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag); +GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glDisable (GLenum cap); +GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glEnable (GLenum cap); +GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glFinish (void); +GL_APICALL void GL_APIENTRY glFlush (void); +GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode); +GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target); +GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL GLenum GL_APIENTRY glGetError (void); +GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); +GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer); +GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program); +GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader); +GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width); +GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program); +GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void); +GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgram (GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program); +GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif +#endif /* GL_ES_VERSION_2_0 */ + +#ifndef GL_ES_VERSION_3_0 +#define GL_ES_VERSION_3_0 1 +typedef unsigned short GLhalf; +#define GL_READ_BUFFER 0x0C02 +#define GL_UNPACK_ROW_LENGTH 0x0CF2 +#define GL_UNPACK_SKIP_ROWS 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS 0x0CF4 +#define GL_PACK_ROW_LENGTH 0x0D02 +#define GL_PACK_SKIP_ROWS 0x0D03 +#define GL_PACK_SKIP_PIXELS 0x0D04 +#define GL_COLOR 0x1800 +#define GL_DEPTH 0x1801 +#define GL_STENCIL 0x1802 +#define GL_RED 0x1903 +#define GL_RGB8 0x8051 +#define GL_RGBA8 0x8058 +#define GL_RGB10_A2 0x8059 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_TEXTURE_3D 0x806F +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_CURRENT_QUERY 0x8865 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_STREAM_READ 0x88E1 +#define GL_STREAM_COPY 0x88E2 +#define GL_STATIC_READ 0x88E5 +#define GL_STATIC_COPY 0x88E6 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_MAJOR_VERSION 0x821B +#define GL_MINOR_VERSION 0x821C +#define GL_NUM_EXTENSIONS 0x821D +#define GL_RGBA32F 0x8814 +#define GL_RGB32F 0x8815 +#define GL_RGBA16F 0x881A +#define GL_RGB16F 0x881B +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_RGB9_E5 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_RGBA32UI 0x8D70 +#define GL_RGB32UI 0x8D71 +#define GL_RGBA16UI 0x8D76 +#define GL_RGB16UI 0x8D77 +#define GL_RGBA8UI 0x8D7C +#define GL_RGB8UI 0x8D7D +#define GL_RGBA32I 0x8D82 +#define GL_RGB32I 0x8D83 +#define GL_RGBA16I 0x8D88 +#define GL_RGB16I 0x8D89 +#define GL_RGBA8I 0x8D8E +#define GL_RGB8I 0x8D8F +#define GL_RED_INTEGER 0x8D94 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_COLOR_ATTACHMENT16 0x8CF0 +#define GL_COLOR_ATTACHMENT17 0x8CF1 +#define GL_COLOR_ATTACHMENT18 0x8CF2 +#define GL_COLOR_ATTACHMENT19 0x8CF3 +#define GL_COLOR_ATTACHMENT20 0x8CF4 +#define GL_COLOR_ATTACHMENT21 0x8CF5 +#define GL_COLOR_ATTACHMENT22 0x8CF6 +#define GL_COLOR_ATTACHMENT23 0x8CF7 +#define GL_COLOR_ATTACHMENT24 0x8CF8 +#define GL_COLOR_ATTACHMENT25 0x8CF9 +#define GL_COLOR_ATTACHMENT26 0x8CFA +#define GL_COLOR_ATTACHMENT27 0x8CFB +#define GL_COLOR_ATTACHMENT28 0x8CFC +#define GL_COLOR_ATTACHMENT29 0x8CFD +#define GL_COLOR_ATTACHMENT30 0x8CFE +#define GL_COLOR_ATTACHMENT31 0x8CFF +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_HALF_FLOAT 0x140B +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#define GL_RG 0x8227 +#define GL_RG_INTEGER 0x8228 +#define GL_R8 0x8229 +#define GL_RG8 0x822B +#define GL_R16F 0x822D +#define GL_R32F 0x822E +#define GL_RG16F 0x822F +#define GL_RG32F 0x8230 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69 +#define GL_COPY_READ_BUFFER 0x8F36 +#define GL_COPY_WRITE_BUFFER 0x8F37 +#define GL_COPY_READ_BUFFER_BINDING 0x8F36 +#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37 +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_INVALID_INDEX 0xFFFFFFFFu +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_OBJECT_TYPE 0x9112 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_STATUS 0x9114 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_UNSIGNALED 0x9118 +#define GL_SIGNALED 0x9119 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_CONDITION_SATISFIED 0x911C +#define GL_WAIT_FAILED 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A +#define GL_SAMPLER_BINDING 0x8919 +#define GL_RGB10_A2UI 0x906F +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_GREEN 0x1904 +#define GL_BLUE 0x1905 +#define GL_INT_2_10_10_10_REV 0x8D9F +#define GL_TRANSFORM_FEEDBACK 0x8E22 +#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25 +#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257 +#define GL_PROGRAM_BINARY_LENGTH 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE +#define GL_PROGRAM_BINARY_FORMATS 0x87FF +#define GL_COMPRESSED_R11_EAC 0x9270 +#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271 +#define GL_COMPRESSED_RG11_EAC 0x9272 +#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 +#define GL_COMPRESSED_RGB8_ETC2 0x9274 +#define GL_COMPRESSED_SRGB8_ETC2 0x9275 +#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 +#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277 +#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 +#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279 +#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F +#define GL_MAX_ELEMENT_INDEX 0x8D6B +#define GL_NUM_SAMPLE_COUNTS 0x9380 +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +typedef void (GL_APIENTRYP PFNGLREADBUFFERPROC) (GLenum src); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLGENQUERIESPROC) (GLsizei n, GLuint *ids); +typedef void (GL_APIENTRYP PFNGLDELETEQUERIESPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISQUERYPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLBEGINQUERYPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLENDQUERYPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETQUERYIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVPROC) (GLuint id, GLenum pname, GLuint *params); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFERPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVPROC) (GLenum target, GLenum pname, void **params); +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSPROC) (GLsizei n, const GLenum *bufs); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VPROC) (GLenum target, GLuint index, GLint *data); +typedef void (GL_APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC) (GLenum primitiveMode); +typedef void (GL_APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERRANGEPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERBASEPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC) (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +typedef void (GL_APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC) (GLuint index, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IVPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC) (GLuint index, const GLuint *v); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMUIVPROC) (GLuint program, GLint location, GLuint *params); +typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATALOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIPROC) (GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIPROC) (GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERIVPROC) (GLenum buffer, GLint drawbuffer, const GLint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERUIVPROC) (GLenum buffer, GLint drawbuffer, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFVPROC) (GLenum buffer, GLint drawbuffer, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFIPROC) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGIPROC) (GLenum name, GLuint index); +typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMINDICESPROC) (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC) (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC) (GLuint program, const GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VPROC) (GLenum pname, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64I_VPROC) (GLenum target, GLuint index, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC) (GLenum target, GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGENSAMPLERSPROC) (GLsizei count, GLuint *samplers); +typedef void (GL_APIENTRYP PFNGLDELETESAMPLERSPROC) (GLsizei count, const GLuint *samplers); +typedef GLboolean (GL_APIENTRYP PFNGLISSAMPLERPROC) (GLuint sampler); +typedef void (GL_APIENTRYP PFNGLBINDSAMPLERPROC) (GLuint unit, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIPROC) (GLuint sampler, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFPROC) (GLuint sampler, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, const GLfloat *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC) (GLuint index, GLuint divisor); +typedef void (GL_APIENTRYP PFNGLBINDTRANSFORMFEEDBACKPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSPROC) (GLsizei n, const GLuint *ids); +typedef void (GL_APIENTRYP PFNGLGENTRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISTRANSFORMFEEDBACKPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIPROC) (GLuint program, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLINVALIDATEFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +typedef void (GL_APIENTRYP PFNGLINVALIDATESUBFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATIVPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBuffer (GLenum src); +GL_APICALL void GL_APIENTRY glDrawRangeElements (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glTexImage3D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glCopyTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glCompressedTexImage3D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glGenQueries (GLsizei n, GLuint *ids); +GL_APICALL void GL_APIENTRY glDeleteQueries (GLsizei n, const GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsQuery (GLuint id); +GL_APICALL void GL_APIENTRY glBeginQuery (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glEndQuery (GLenum target); +GL_APICALL void GL_APIENTRY glGetQueryiv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectuiv (GLuint id, GLenum pname, GLuint *params); +GL_APICALL GLboolean GL_APIENTRY glUnmapBuffer (GLenum target); +GL_APICALL void GL_APIENTRY glGetBufferPointerv (GLenum target, GLenum pname, void **params); +GL_APICALL void GL_APIENTRY glDrawBuffers (GLsizei n, const GLenum *bufs); +GL_APICALL void GL_APIENTRY glUniformMatrix2x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glBlitFramebuffer (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTextureLayer (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GL_APICALL void *GL_APIENTRY glMapBufferRange (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_APICALL void GL_APIENTRY glFlushMappedBufferRange (GLenum target, GLintptr offset, GLsizeiptr length); +GL_APICALL void GL_APIENTRY glBindVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glDeleteVertexArrays (GLsizei n, const GLuint *arrays); +GL_APICALL void GL_APIENTRY glGenVertexArrays (GLsizei n, GLuint *arrays); +GL_APICALL GLboolean GL_APIENTRY glIsVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glGetIntegeri_v (GLenum target, GLuint index, GLint *data); +GL_APICALL void GL_APIENTRY glBeginTransformFeedback (GLenum primitiveMode); +GL_APICALL void GL_APIENTRY glEndTransformFeedback (void); +GL_APICALL void GL_APIENTRY glBindBufferRange (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glBindBufferBase (GLenum target, GLuint index, GLuint buffer); +GL_APICALL void GL_APIENTRY glTransformFeedbackVaryings (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +GL_APICALL void GL_APIENTRY glGetTransformFeedbackVarying (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glVertexAttribIPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glGetVertexAttribIiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribIuiv (GLuint index, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glVertexAttribI4i (GLuint index, GLint x, GLint y, GLint z, GLint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4ui (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4iv (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glVertexAttribI4uiv (GLuint index, const GLuint *v); +GL_APICALL void GL_APIENTRY glGetUniformuiv (GLuint program, GLint location, GLuint *params); +GL_APICALL GLint GL_APIENTRY glGetFragDataLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glUniform1ui (GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glUniform2ui (GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glUniform3ui (GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glUniform4ui (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glUniform1uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform2uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform3uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform4uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferiv (GLenum buffer, GLint drawbuffer, const GLint *value); +GL_APICALL void GL_APIENTRY glClearBufferuiv (GLenum buffer, GLint drawbuffer, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferfv (GLenum buffer, GLint drawbuffer, const GLfloat *value); +GL_APICALL void GL_APIENTRY glClearBufferfi (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +GL_APICALL const GLubyte *GL_APIENTRY glGetStringi (GLenum name, GLuint index); +GL_APICALL void GL_APIENTRY glCopyBufferSubData (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glGetUniformIndices (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +GL_APICALL void GL_APIENTRY glGetActiveUniformsiv (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetUniformBlockIndex (GLuint program, const GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockiv (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockName (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glUniformBlockBinding (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +GL_APICALL void GL_APIENTRY glDrawArraysInstanced (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +GL_APICALL void GL_APIENTRY glDrawElementsInstanced (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +GL_APICALL GLsync GL_APIENTRY glFenceSync (GLenum condition, GLbitfield flags); +GL_APICALL GLboolean GL_APIENTRY glIsSync (GLsync sync); +GL_APICALL void GL_APIENTRY glDeleteSync (GLsync sync); +GL_APICALL GLenum GL_APIENTRY glClientWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glGetInteger64v (GLenum pname, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetSynciv (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +GL_APICALL void GL_APIENTRY glGetInteger64i_v (GLenum target, GLuint index, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteri64v (GLenum target, GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGenSamplers (GLsizei count, GLuint *samplers); +GL_APICALL void GL_APIENTRY glDeleteSamplers (GLsizei count, const GLuint *samplers); +GL_APICALL GLboolean GL_APIENTRY glIsSampler (GLuint sampler); +GL_APICALL void GL_APIENTRY glBindSampler (GLuint unit, GLuint sampler); +GL_APICALL void GL_APIENTRY glSamplerParameteri (GLuint sampler, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glVertexAttribDivisor (GLuint index, GLuint divisor); +GL_APICALL void GL_APIENTRY glBindTransformFeedback (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glDeleteTransformFeedbacks (GLsizei n, const GLuint *ids); +GL_APICALL void GL_APIENTRY glGenTransformFeedbacks (GLsizei n, GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsTransformFeedback (GLuint id); +GL_APICALL void GL_APIENTRY glPauseTransformFeedback (void); +GL_APICALL void GL_APIENTRY glResumeTransformFeedback (void); +GL_APICALL void GL_APIENTRY glGetProgramBinary (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +GL_APICALL void GL_APIENTRY glProgramBinary (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glProgramParameteri (GLuint program, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glInvalidateFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments); +GL_APICALL void GL_APIENTRY glInvalidateSubFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage2D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage3D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_APICALL void GL_APIENTRY glGetInternalformativ (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_ES_VERSION_3_0 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/GLES3/gl31.h b/sources/pvrsdk/Include/GLES3/gl31.h new file mode 100755 index 00000000..64409bba --- /dev/null +++ b/sources/pvrsdk/Include/GLES3/gl31.h @@ -0,0 +1,1528 @@ +#ifndef __gl31_h_ +#define __gl31_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** http://www.opengl.org/registry/ +** +** Khronos $Revision$ on $Date$ +*/ + +#include + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +#ifdef GL_PROTOTYPES +#define GL_GLES_PROTOTYPES 1 +#endif + +/* Generated on date 20161024 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9]|3\.[01] + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_ES_VERSION_2_0 +#define GL_ES_VERSION_2_0 1 +#include +typedef khronos_int8_t GLbyte; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLfixed; +typedef short GLshort; +typedef unsigned short GLushort; +typedef void GLvoid; +typedef struct __GLsync *GLsync; +typedef khronos_int64_t GLint64; +typedef khronos_uint64_t GLuint64; +typedef unsigned int GLenum; +typedef unsigned int GLuint; +typedef char GLchar; +typedef khronos_float_t GLfloat; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_uint8_t GLubyte; +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_FUNC_ADD 0x8006 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_BLEND 0x0BE2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_LINE_WIDTH 0x0B21 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VIEWPORT 0x0BA2 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_SHADER_TYPE 0x8B4F +#define GL_DELETE_STATUS 0x8B80 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_INVERT 0x150A +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGB565 0x8D62 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_NONE 0 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s); +typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures); +typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer); +typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode); +typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width); +typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture); +GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target); +GL_APICALL void GL_APIENTRY glClear (GLbitfield mask); +GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d); +GL_APICALL void GL_APIENTRY glClearStencil (GLint s); +GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader); +GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL GLuint GL_APIENTRY glCreateProgram (void); +GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type); +GL_APICALL void GL_APIENTRY glCullFace (GLenum mode); +GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program); +GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader); +GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func); +GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag); +GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glDisable (GLenum cap); +GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glEnable (GLenum cap); +GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glFinish (void); +GL_APICALL void GL_APIENTRY glFlush (void); +GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode); +GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target); +GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL GLenum GL_APIENTRY glGetError (void); +GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); +GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer); +GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program); +GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader); +GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width); +GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program); +GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void); +GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgram (GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program); +GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif +#endif /* GL_ES_VERSION_2_0 */ + +#ifndef GL_ES_VERSION_3_0 +#define GL_ES_VERSION_3_0 1 +typedef unsigned short GLhalf; +#define GL_READ_BUFFER 0x0C02 +#define GL_UNPACK_ROW_LENGTH 0x0CF2 +#define GL_UNPACK_SKIP_ROWS 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS 0x0CF4 +#define GL_PACK_ROW_LENGTH 0x0D02 +#define GL_PACK_SKIP_ROWS 0x0D03 +#define GL_PACK_SKIP_PIXELS 0x0D04 +#define GL_COLOR 0x1800 +#define GL_DEPTH 0x1801 +#define GL_STENCIL 0x1802 +#define GL_RED 0x1903 +#define GL_RGB8 0x8051 +#define GL_RGBA8 0x8058 +#define GL_RGB10_A2 0x8059 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_TEXTURE_3D 0x806F +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_CURRENT_QUERY 0x8865 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_STREAM_READ 0x88E1 +#define GL_STREAM_COPY 0x88E2 +#define GL_STATIC_READ 0x88E5 +#define GL_STATIC_COPY 0x88E6 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_MAJOR_VERSION 0x821B +#define GL_MINOR_VERSION 0x821C +#define GL_NUM_EXTENSIONS 0x821D +#define GL_RGBA32F 0x8814 +#define GL_RGB32F 0x8815 +#define GL_RGBA16F 0x881A +#define GL_RGB16F 0x881B +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_RGB9_E5 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_RGBA32UI 0x8D70 +#define GL_RGB32UI 0x8D71 +#define GL_RGBA16UI 0x8D76 +#define GL_RGB16UI 0x8D77 +#define GL_RGBA8UI 0x8D7C +#define GL_RGB8UI 0x8D7D +#define GL_RGBA32I 0x8D82 +#define GL_RGB32I 0x8D83 +#define GL_RGBA16I 0x8D88 +#define GL_RGB16I 0x8D89 +#define GL_RGBA8I 0x8D8E +#define GL_RGB8I 0x8D8F +#define GL_RED_INTEGER 0x8D94 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_COLOR_ATTACHMENT16 0x8CF0 +#define GL_COLOR_ATTACHMENT17 0x8CF1 +#define GL_COLOR_ATTACHMENT18 0x8CF2 +#define GL_COLOR_ATTACHMENT19 0x8CF3 +#define GL_COLOR_ATTACHMENT20 0x8CF4 +#define GL_COLOR_ATTACHMENT21 0x8CF5 +#define GL_COLOR_ATTACHMENT22 0x8CF6 +#define GL_COLOR_ATTACHMENT23 0x8CF7 +#define GL_COLOR_ATTACHMENT24 0x8CF8 +#define GL_COLOR_ATTACHMENT25 0x8CF9 +#define GL_COLOR_ATTACHMENT26 0x8CFA +#define GL_COLOR_ATTACHMENT27 0x8CFB +#define GL_COLOR_ATTACHMENT28 0x8CFC +#define GL_COLOR_ATTACHMENT29 0x8CFD +#define GL_COLOR_ATTACHMENT30 0x8CFE +#define GL_COLOR_ATTACHMENT31 0x8CFF +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_HALF_FLOAT 0x140B +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#define GL_RG 0x8227 +#define GL_RG_INTEGER 0x8228 +#define GL_R8 0x8229 +#define GL_RG8 0x822B +#define GL_R16F 0x822D +#define GL_R32F 0x822E +#define GL_RG16F 0x822F +#define GL_RG32F 0x8230 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69 +#define GL_COPY_READ_BUFFER 0x8F36 +#define GL_COPY_WRITE_BUFFER 0x8F37 +#define GL_COPY_READ_BUFFER_BINDING 0x8F36 +#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37 +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_INVALID_INDEX 0xFFFFFFFFu +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_OBJECT_TYPE 0x9112 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_STATUS 0x9114 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_UNSIGNALED 0x9118 +#define GL_SIGNALED 0x9119 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_CONDITION_SATISFIED 0x911C +#define GL_WAIT_FAILED 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A +#define GL_SAMPLER_BINDING 0x8919 +#define GL_RGB10_A2UI 0x906F +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_GREEN 0x1904 +#define GL_BLUE 0x1905 +#define GL_INT_2_10_10_10_REV 0x8D9F +#define GL_TRANSFORM_FEEDBACK 0x8E22 +#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25 +#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257 +#define GL_PROGRAM_BINARY_LENGTH 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE +#define GL_PROGRAM_BINARY_FORMATS 0x87FF +#define GL_COMPRESSED_R11_EAC 0x9270 +#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271 +#define GL_COMPRESSED_RG11_EAC 0x9272 +#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 +#define GL_COMPRESSED_RGB8_ETC2 0x9274 +#define GL_COMPRESSED_SRGB8_ETC2 0x9275 +#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 +#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277 +#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 +#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279 +#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F +#define GL_MAX_ELEMENT_INDEX 0x8D6B +#define GL_NUM_SAMPLE_COUNTS 0x9380 +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +typedef void (GL_APIENTRYP PFNGLREADBUFFERPROC) (GLenum src); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLGENQUERIESPROC) (GLsizei n, GLuint *ids); +typedef void (GL_APIENTRYP PFNGLDELETEQUERIESPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISQUERYPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLBEGINQUERYPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLENDQUERYPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETQUERYIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVPROC) (GLuint id, GLenum pname, GLuint *params); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFERPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVPROC) (GLenum target, GLenum pname, void **params); +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSPROC) (GLsizei n, const GLenum *bufs); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VPROC) (GLenum target, GLuint index, GLint *data); +typedef void (GL_APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC) (GLenum primitiveMode); +typedef void (GL_APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERRANGEPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERBASEPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC) (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +typedef void (GL_APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC) (GLuint index, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IVPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC) (GLuint index, const GLuint *v); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMUIVPROC) (GLuint program, GLint location, GLuint *params); +typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATALOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIPROC) (GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIPROC) (GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERIVPROC) (GLenum buffer, GLint drawbuffer, const GLint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERUIVPROC) (GLenum buffer, GLint drawbuffer, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFVPROC) (GLenum buffer, GLint drawbuffer, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFIPROC) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGIPROC) (GLenum name, GLuint index); +typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMINDICESPROC) (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC) (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC) (GLuint program, const GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VPROC) (GLenum pname, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64I_VPROC) (GLenum target, GLuint index, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC) (GLenum target, GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGENSAMPLERSPROC) (GLsizei count, GLuint *samplers); +typedef void (GL_APIENTRYP PFNGLDELETESAMPLERSPROC) (GLsizei count, const GLuint *samplers); +typedef GLboolean (GL_APIENTRYP PFNGLISSAMPLERPROC) (GLuint sampler); +typedef void (GL_APIENTRYP PFNGLBINDSAMPLERPROC) (GLuint unit, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIPROC) (GLuint sampler, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFPROC) (GLuint sampler, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, const GLfloat *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC) (GLuint index, GLuint divisor); +typedef void (GL_APIENTRYP PFNGLBINDTRANSFORMFEEDBACKPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSPROC) (GLsizei n, const GLuint *ids); +typedef void (GL_APIENTRYP PFNGLGENTRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISTRANSFORMFEEDBACKPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIPROC) (GLuint program, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLINVALIDATEFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +typedef void (GL_APIENTRYP PFNGLINVALIDATESUBFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATIVPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBuffer (GLenum src); +GL_APICALL void GL_APIENTRY glDrawRangeElements (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glTexImage3D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glCopyTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glCompressedTexImage3D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glGenQueries (GLsizei n, GLuint *ids); +GL_APICALL void GL_APIENTRY glDeleteQueries (GLsizei n, const GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsQuery (GLuint id); +GL_APICALL void GL_APIENTRY glBeginQuery (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glEndQuery (GLenum target); +GL_APICALL void GL_APIENTRY glGetQueryiv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectuiv (GLuint id, GLenum pname, GLuint *params); +GL_APICALL GLboolean GL_APIENTRY glUnmapBuffer (GLenum target); +GL_APICALL void GL_APIENTRY glGetBufferPointerv (GLenum target, GLenum pname, void **params); +GL_APICALL void GL_APIENTRY glDrawBuffers (GLsizei n, const GLenum *bufs); +GL_APICALL void GL_APIENTRY glUniformMatrix2x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glBlitFramebuffer (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTextureLayer (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GL_APICALL void *GL_APIENTRY glMapBufferRange (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_APICALL void GL_APIENTRY glFlushMappedBufferRange (GLenum target, GLintptr offset, GLsizeiptr length); +GL_APICALL void GL_APIENTRY glBindVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glDeleteVertexArrays (GLsizei n, const GLuint *arrays); +GL_APICALL void GL_APIENTRY glGenVertexArrays (GLsizei n, GLuint *arrays); +GL_APICALL GLboolean GL_APIENTRY glIsVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glGetIntegeri_v (GLenum target, GLuint index, GLint *data); +GL_APICALL void GL_APIENTRY glBeginTransformFeedback (GLenum primitiveMode); +GL_APICALL void GL_APIENTRY glEndTransformFeedback (void); +GL_APICALL void GL_APIENTRY glBindBufferRange (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glBindBufferBase (GLenum target, GLuint index, GLuint buffer); +GL_APICALL void GL_APIENTRY glTransformFeedbackVaryings (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +GL_APICALL void GL_APIENTRY glGetTransformFeedbackVarying (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glVertexAttribIPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glGetVertexAttribIiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribIuiv (GLuint index, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glVertexAttribI4i (GLuint index, GLint x, GLint y, GLint z, GLint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4ui (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4iv (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glVertexAttribI4uiv (GLuint index, const GLuint *v); +GL_APICALL void GL_APIENTRY glGetUniformuiv (GLuint program, GLint location, GLuint *params); +GL_APICALL GLint GL_APIENTRY glGetFragDataLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glUniform1ui (GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glUniform2ui (GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glUniform3ui (GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glUniform4ui (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glUniform1uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform2uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform3uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform4uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferiv (GLenum buffer, GLint drawbuffer, const GLint *value); +GL_APICALL void GL_APIENTRY glClearBufferuiv (GLenum buffer, GLint drawbuffer, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferfv (GLenum buffer, GLint drawbuffer, const GLfloat *value); +GL_APICALL void GL_APIENTRY glClearBufferfi (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +GL_APICALL const GLubyte *GL_APIENTRY glGetStringi (GLenum name, GLuint index); +GL_APICALL void GL_APIENTRY glCopyBufferSubData (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glGetUniformIndices (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +GL_APICALL void GL_APIENTRY glGetActiveUniformsiv (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetUniformBlockIndex (GLuint program, const GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockiv (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockName (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glUniformBlockBinding (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +GL_APICALL void GL_APIENTRY glDrawArraysInstanced (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +GL_APICALL void GL_APIENTRY glDrawElementsInstanced (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +GL_APICALL GLsync GL_APIENTRY glFenceSync (GLenum condition, GLbitfield flags); +GL_APICALL GLboolean GL_APIENTRY glIsSync (GLsync sync); +GL_APICALL void GL_APIENTRY glDeleteSync (GLsync sync); +GL_APICALL GLenum GL_APIENTRY glClientWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glGetInteger64v (GLenum pname, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetSynciv (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +GL_APICALL void GL_APIENTRY glGetInteger64i_v (GLenum target, GLuint index, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteri64v (GLenum target, GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGenSamplers (GLsizei count, GLuint *samplers); +GL_APICALL void GL_APIENTRY glDeleteSamplers (GLsizei count, const GLuint *samplers); +GL_APICALL GLboolean GL_APIENTRY glIsSampler (GLuint sampler); +GL_APICALL void GL_APIENTRY glBindSampler (GLuint unit, GLuint sampler); +GL_APICALL void GL_APIENTRY glSamplerParameteri (GLuint sampler, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glVertexAttribDivisor (GLuint index, GLuint divisor); +GL_APICALL void GL_APIENTRY glBindTransformFeedback (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glDeleteTransformFeedbacks (GLsizei n, const GLuint *ids); +GL_APICALL void GL_APIENTRY glGenTransformFeedbacks (GLsizei n, GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsTransformFeedback (GLuint id); +GL_APICALL void GL_APIENTRY glPauseTransformFeedback (void); +GL_APICALL void GL_APIENTRY glResumeTransformFeedback (void); +GL_APICALL void GL_APIENTRY glGetProgramBinary (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +GL_APICALL void GL_APIENTRY glProgramBinary (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glProgramParameteri (GLuint program, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glInvalidateFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments); +GL_APICALL void GL_APIENTRY glInvalidateSubFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage2D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage3D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_APICALL void GL_APIENTRY glGetInternalformativ (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_ES_VERSION_3_0 */ + +#ifndef GL_ES_VERSION_3_1 +#define GL_ES_VERSION_3_1 1 +#define GL_COMPUTE_SHADER 0x91B9 +#define GL_MAX_COMPUTE_UNIFORM_BLOCKS 0x91BB +#define GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS 0x91BC +#define GL_MAX_COMPUTE_IMAGE_UNIFORMS 0x91BD +#define GL_MAX_COMPUTE_SHARED_MEMORY_SIZE 0x8262 +#define GL_MAX_COMPUTE_UNIFORM_COMPONENTS 0x8263 +#define GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS 0x8264 +#define GL_MAX_COMPUTE_ATOMIC_COUNTERS 0x8265 +#define GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS 0x8266 +#define GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS 0x90EB +#define GL_MAX_COMPUTE_WORK_GROUP_COUNT 0x91BE +#define GL_MAX_COMPUTE_WORK_GROUP_SIZE 0x91BF +#define GL_COMPUTE_WORK_GROUP_SIZE 0x8267 +#define GL_DISPATCH_INDIRECT_BUFFER 0x90EE +#define GL_DISPATCH_INDIRECT_BUFFER_BINDING 0x90EF +#define GL_COMPUTE_SHADER_BIT 0x00000020 +#define GL_DRAW_INDIRECT_BUFFER 0x8F3F +#define GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43 +#define GL_MAX_UNIFORM_LOCATIONS 0x826E +#define GL_FRAMEBUFFER_DEFAULT_WIDTH 0x9310 +#define GL_FRAMEBUFFER_DEFAULT_HEIGHT 0x9311 +#define GL_FRAMEBUFFER_DEFAULT_SAMPLES 0x9313 +#define GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS 0x9314 +#define GL_MAX_FRAMEBUFFER_WIDTH 0x9315 +#define GL_MAX_FRAMEBUFFER_HEIGHT 0x9316 +#define GL_MAX_FRAMEBUFFER_SAMPLES 0x9318 +#define GL_UNIFORM 0x92E1 +#define GL_UNIFORM_BLOCK 0x92E2 +#define GL_PROGRAM_INPUT 0x92E3 +#define GL_PROGRAM_OUTPUT 0x92E4 +#define GL_BUFFER_VARIABLE 0x92E5 +#define GL_SHADER_STORAGE_BLOCK 0x92E6 +#define GL_ATOMIC_COUNTER_BUFFER 0x92C0 +#define GL_TRANSFORM_FEEDBACK_VARYING 0x92F4 +#define GL_ACTIVE_RESOURCES 0x92F5 +#define GL_MAX_NAME_LENGTH 0x92F6 +#define GL_MAX_NUM_ACTIVE_VARIABLES 0x92F7 +#define GL_NAME_LENGTH 0x92F9 +#define GL_TYPE 0x92FA +#define GL_ARRAY_SIZE 0x92FB +#define GL_OFFSET 0x92FC +#define GL_BLOCK_INDEX 0x92FD +#define GL_ARRAY_STRIDE 0x92FE +#define GL_MATRIX_STRIDE 0x92FF +#define GL_IS_ROW_MAJOR 0x9300 +#define GL_ATOMIC_COUNTER_BUFFER_INDEX 0x9301 +#define GL_BUFFER_BINDING 0x9302 +#define GL_BUFFER_DATA_SIZE 0x9303 +#define GL_NUM_ACTIVE_VARIABLES 0x9304 +#define GL_ACTIVE_VARIABLES 0x9305 +#define GL_REFERENCED_BY_VERTEX_SHADER 0x9306 +#define GL_REFERENCED_BY_FRAGMENT_SHADER 0x930A +#define GL_REFERENCED_BY_COMPUTE_SHADER 0x930B +#define GL_TOP_LEVEL_ARRAY_SIZE 0x930C +#define GL_TOP_LEVEL_ARRAY_STRIDE 0x930D +#define GL_LOCATION 0x930E +#define GL_VERTEX_SHADER_BIT 0x00000001 +#define GL_FRAGMENT_SHADER_BIT 0x00000002 +#define GL_ALL_SHADER_BITS 0xFFFFFFFF +#define GL_PROGRAM_SEPARABLE 0x8258 +#define GL_ACTIVE_PROGRAM 0x8259 +#define GL_PROGRAM_PIPELINE_BINDING 0x825A +#define GL_ATOMIC_COUNTER_BUFFER_BINDING 0x92C1 +#define GL_ATOMIC_COUNTER_BUFFER_START 0x92C2 +#define GL_ATOMIC_COUNTER_BUFFER_SIZE 0x92C3 +#define GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS 0x92CC +#define GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS 0x92D0 +#define GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS 0x92D1 +#define GL_MAX_VERTEX_ATOMIC_COUNTERS 0x92D2 +#define GL_MAX_FRAGMENT_ATOMIC_COUNTERS 0x92D6 +#define GL_MAX_COMBINED_ATOMIC_COUNTERS 0x92D7 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE 0x92D8 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS 0x92DC +#define GL_ACTIVE_ATOMIC_COUNTER_BUFFERS 0x92D9 +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB +#define GL_MAX_IMAGE_UNITS 0x8F38 +#define GL_MAX_VERTEX_IMAGE_UNIFORMS 0x90CA +#define GL_MAX_FRAGMENT_IMAGE_UNIFORMS 0x90CE +#define GL_MAX_COMBINED_IMAGE_UNIFORMS 0x90CF +#define GL_IMAGE_BINDING_NAME 0x8F3A +#define GL_IMAGE_BINDING_LEVEL 0x8F3B +#define GL_IMAGE_BINDING_LAYERED 0x8F3C +#define GL_IMAGE_BINDING_LAYER 0x8F3D +#define GL_IMAGE_BINDING_ACCESS 0x8F3E +#define GL_IMAGE_BINDING_FORMAT 0x906E +#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT 0x00000001 +#define GL_ELEMENT_ARRAY_BARRIER_BIT 0x00000002 +#define GL_UNIFORM_BARRIER_BIT 0x00000004 +#define GL_TEXTURE_FETCH_BARRIER_BIT 0x00000008 +#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT 0x00000020 +#define GL_COMMAND_BARRIER_BIT 0x00000040 +#define GL_PIXEL_BUFFER_BARRIER_BIT 0x00000080 +#define GL_TEXTURE_UPDATE_BARRIER_BIT 0x00000100 +#define GL_BUFFER_UPDATE_BARRIER_BIT 0x00000200 +#define GL_FRAMEBUFFER_BARRIER_BIT 0x00000400 +#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT 0x00000800 +#define GL_ATOMIC_COUNTER_BARRIER_BIT 0x00001000 +#define GL_ALL_BARRIER_BITS 0xFFFFFFFF +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_IMAGE_FORMAT_COMPATIBILITY_TYPE 0x90C7 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE 0x90C8 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS 0x90C9 +#define GL_READ_ONLY 0x88B8 +#define GL_WRITE_ONLY 0x88B9 +#define GL_READ_WRITE 0x88BA +#define GL_SHADER_STORAGE_BUFFER 0x90D2 +#define GL_SHADER_STORAGE_BUFFER_BINDING 0x90D3 +#define GL_SHADER_STORAGE_BUFFER_START 0x90D4 +#define GL_SHADER_STORAGE_BUFFER_SIZE 0x90D5 +#define GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS 0x90D6 +#define GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS 0x90DA +#define GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS 0x90DB +#define GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS 0x90DC +#define GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS 0x90DD +#define GL_MAX_SHADER_STORAGE_BLOCK_SIZE 0x90DE +#define GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT 0x90DF +#define GL_SHADER_STORAGE_BARRIER_BIT 0x00002000 +#define GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES 0x8F39 +#define GL_DEPTH_STENCIL_TEXTURE_MODE 0x90EA +#define GL_STENCIL_INDEX 0x1901 +#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5E +#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5F +#define GL_SAMPLE_POSITION 0x8E50 +#define GL_SAMPLE_MASK 0x8E51 +#define GL_SAMPLE_MASK_VALUE 0x8E52 +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59 +#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E +#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F +#define GL_MAX_INTEGER_SAMPLES 0x9110 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104 +#define GL_TEXTURE_SAMPLES 0x9106 +#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107 +#define GL_TEXTURE_WIDTH 0x1000 +#define GL_TEXTURE_HEIGHT 0x1001 +#define GL_TEXTURE_DEPTH 0x8071 +#define GL_TEXTURE_INTERNAL_FORMAT 0x1003 +#define GL_TEXTURE_RED_SIZE 0x805C +#define GL_TEXTURE_GREEN_SIZE 0x805D +#define GL_TEXTURE_BLUE_SIZE 0x805E +#define GL_TEXTURE_ALPHA_SIZE 0x805F +#define GL_TEXTURE_DEPTH_SIZE 0x884A +#define GL_TEXTURE_STENCIL_SIZE 0x88F1 +#define GL_TEXTURE_SHARED_SIZE 0x8C3F +#define GL_TEXTURE_RED_TYPE 0x8C10 +#define GL_TEXTURE_GREEN_TYPE 0x8C11 +#define GL_TEXTURE_BLUE_TYPE 0x8C12 +#define GL_TEXTURE_ALPHA_TYPE 0x8C13 +#define GL_TEXTURE_DEPTH_TYPE 0x8C16 +#define GL_TEXTURE_COMPRESSED 0x86A1 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A +#define GL_VERTEX_ATTRIB_BINDING 0x82D4 +#define GL_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D5 +#define GL_VERTEX_BINDING_DIVISOR 0x82D6 +#define GL_VERTEX_BINDING_OFFSET 0x82D7 +#define GL_VERTEX_BINDING_STRIDE 0x82D8 +#define GL_VERTEX_BINDING_BUFFER 0x8F4F +#define GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D9 +#define GL_MAX_VERTEX_ATTRIB_BINDINGS 0x82DA +#define GL_MAX_VERTEX_ATTRIB_STRIDE 0x82E5 +typedef void (GL_APIENTRYP PFNGLDISPATCHCOMPUTEPROC) (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +typedef void (GL_APIENTRYP PFNGLDISPATCHCOMPUTEINDIRECTPROC) (GLintptr indirect); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINDIRECTPROC) (GLenum mode, const void *indirect); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINDIRECTPROC) (GLenum mode, GLenum type, const void *indirect); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINTERFACEIVPROC) (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEINDEXPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCENAMEPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEIVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMSTAGESPROC) (GLuint pipeline, GLbitfield stages, GLuint program); +typedef void (GL_APIENTRYP PFNGLACTIVESHADERPROGRAMPROC) (GLuint pipeline, GLuint program); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROGRAMVPROC) (GLenum type, GLsizei count, const GLchar *const*strings); +typedef void (GL_APIENTRYP PFNGLBINDPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPIPELINESPROC) (GLsizei n, const GLuint *pipelines); +typedef void (GL_APIENTRYP PFNGLGENPROGRAMPIPELINESPROC) (GLsizei n, GLuint *pipelines); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEIVPROC) (GLuint pipeline, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IPROC) (GLuint program, GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IPROC) (GLuint program, GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIPROC) (GLuint program, GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FPROC) (GLuint program, GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLBINDIMAGETEXTUREPROC) (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANI_VPROC) (GLenum target, GLuint index, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLMEMORYBARRIERPROC) (GLbitfield barriers); +typedef void (GL_APIENTRYP PFNGLMEMORYBARRIERBYREGIONPROC) (GLbitfield barriers); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (GL_APIENTRYP PFNGLGETMULTISAMPLEFVPROC) (GLenum pname, GLuint index, GLfloat *val); +typedef void (GL_APIENTRYP PFNGLSAMPLEMASKIPROC) (GLuint maskNumber, GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLGETTEXLEVELPARAMETERIVPROC) (GLenum target, GLint level, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXLEVELPARAMETERFVPROC) (GLenum target, GLint level, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXBUFFERPROC) (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBBINDINGPROC) (GLuint attribindex, GLuint bindingindex); +typedef void (GL_APIENTRYP PFNGLVERTEXBINDINGDIVISORPROC) (GLuint bindingindex, GLuint divisor); +#if GL_GLES_PROTOTYPES +GL_APICALL void GL_APIENTRY glDispatchCompute (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +GL_APICALL void GL_APIENTRY glDispatchComputeIndirect (GLintptr indirect); +GL_APICALL void GL_APIENTRY glDrawArraysIndirect (GLenum mode, const void *indirect); +GL_APICALL void GL_APIENTRY glDrawElementsIndirect (GLenum mode, GLenum type, const void *indirect); +GL_APICALL void GL_APIENTRY glFramebufferParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glGetFramebufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInterfaceiv (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetProgramResourceIndex (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetProgramResourceName (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +GL_APICALL void GL_APIENTRY glGetProgramResourceiv (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetProgramResourceLocation (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL void GL_APIENTRY glUseProgramStages (GLuint pipeline, GLbitfield stages, GLuint program); +GL_APICALL void GL_APIENTRY glActiveShaderProgram (GLuint pipeline, GLuint program); +GL_APICALL GLuint GL_APIENTRY glCreateShaderProgramv (GLenum type, GLsizei count, const GLchar *const*strings); +GL_APICALL void GL_APIENTRY glBindProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glDeleteProgramPipelines (GLsizei n, const GLuint *pipelines); +GL_APICALL void GL_APIENTRY glGenProgramPipelines (GLsizei n, GLuint *pipelines); +GL_APICALL GLboolean GL_APIENTRY glIsProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glGetProgramPipelineiv (GLuint pipeline, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glProgramUniform1i (GLuint program, GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2i (GLuint program, GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1ui (GLuint program, GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2ui (GLuint program, GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1f (GLuint program, GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glProgramUniform2f (GLuint program, GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glProgramUniform3f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glProgramUniform4f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glProgramUniform1iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform1uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform1fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform2fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform3fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform4fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glValidateProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glGetProgramPipelineInfoLog (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glBindImageTexture (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +GL_APICALL void GL_APIENTRY glGetBooleani_v (GLenum target, GLuint index, GLboolean *data); +GL_APICALL void GL_APIENTRY glMemoryBarrier (GLbitfield barriers); +GL_APICALL void GL_APIENTRY glMemoryBarrierByRegion (GLbitfield barriers); +GL_APICALL void GL_APIENTRY glTexStorage2DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +GL_APICALL void GL_APIENTRY glGetMultisamplefv (GLenum pname, GLuint index, GLfloat *val); +GL_APICALL void GL_APIENTRY glSampleMaski (GLuint maskNumber, GLbitfield mask); +GL_APICALL void GL_APIENTRY glGetTexLevelParameteriv (GLenum target, GLint level, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexLevelParameterfv (GLenum target, GLint level, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glBindVertexBuffer (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +GL_APICALL void GL_APIENTRY glVertexAttribFormat (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +GL_APICALL void GL_APIENTRY glVertexAttribIFormat (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +GL_APICALL void GL_APIENTRY glVertexAttribBinding (GLuint attribindex, GLuint bindingindex); +GL_APICALL void GL_APIENTRY glVertexBindingDivisor (GLuint bindingindex, GLuint divisor); +#endif +#endif /* GL_ES_VERSION_3_1 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/GLES3/gl32.h b/sources/pvrsdk/Include/GLES3/gl32.h new file mode 100755 index 00000000..4930d6fe --- /dev/null +++ b/sources/pvrsdk/Include/GLES3/gl32.h @@ -0,0 +1,1825 @@ +#ifndef __gl32_h_ +#define __gl32_h_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2013-2015 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ +/* +** This header is generated from the Khronos OpenGL / OpenGL ES XML +** API Registry. The current version of the Registry, generator scripts +** used to make the header, and the header can be found at +** http://www.opengl.org/registry/ +** +** Khronos $Revision$ on $Date$ +*/ + +#include "GLES3/gl3platform.h" + +#ifndef GL_APIENTRYP +#define GL_APIENTRYP GL_APIENTRY* +#endif + +/* Generated on date 20150809 */ + +/* Generated C header for: + * API: gles2 + * Profile: common + * Versions considered: 2\.[0-9]|3\.[012] + * Versions emitted: .* + * Default extensions included: None + * Additional extensions included: _nomatch_^ + * Extensions removed: _nomatch_^ + */ + +#ifndef GL_ES_VERSION_2_0 +#define GL_ES_VERSION_2_0 1 +#include +typedef khronos_int8_t GLbyte; +typedef khronos_float_t GLclampf; +typedef khronos_int32_t GLfixed; +typedef short GLshort; +typedef unsigned short GLushort; +typedef void GLvoid; +typedef struct __GLsync *GLsync; +typedef khronos_int64_t GLint64; +typedef khronos_uint64_t GLuint64; +typedef unsigned int GLenum; +typedef unsigned int GLuint; +typedef char GLchar; +typedef khronos_float_t GLfloat; +typedef khronos_ssize_t GLsizeiptr; +typedef khronos_intptr_t GLintptr; +typedef unsigned int GLbitfield; +typedef int GLint; +typedef unsigned char GLboolean; +typedef int GLsizei; +typedef khronos_uint8_t GLubyte; +#define GL_DEPTH_BUFFER_BIT 0x00000100 +#define GL_STENCIL_BUFFER_BIT 0x00000400 +#define GL_COLOR_BUFFER_BIT 0x00004000 +#define GL_FALSE 0 +#define GL_TRUE 1 +#define GL_POINTS 0x0000 +#define GL_LINES 0x0001 +#define GL_LINE_LOOP 0x0002 +#define GL_LINE_STRIP 0x0003 +#define GL_TRIANGLES 0x0004 +#define GL_TRIANGLE_STRIP 0x0005 +#define GL_TRIANGLE_FAN 0x0006 +#define GL_ZERO 0 +#define GL_ONE 1 +#define GL_SRC_COLOR 0x0300 +#define GL_ONE_MINUS_SRC_COLOR 0x0301 +#define GL_SRC_ALPHA 0x0302 +#define GL_ONE_MINUS_SRC_ALPHA 0x0303 +#define GL_DST_ALPHA 0x0304 +#define GL_ONE_MINUS_DST_ALPHA 0x0305 +#define GL_DST_COLOR 0x0306 +#define GL_ONE_MINUS_DST_COLOR 0x0307 +#define GL_SRC_ALPHA_SATURATE 0x0308 +#define GL_FUNC_ADD 0x8006 +#define GL_BLEND_EQUATION 0x8009 +#define GL_BLEND_EQUATION_RGB 0x8009 +#define GL_BLEND_EQUATION_ALPHA 0x883D +#define GL_FUNC_SUBTRACT 0x800A +#define GL_FUNC_REVERSE_SUBTRACT 0x800B +#define GL_BLEND_DST_RGB 0x80C8 +#define GL_BLEND_SRC_RGB 0x80C9 +#define GL_BLEND_DST_ALPHA 0x80CA +#define GL_BLEND_SRC_ALPHA 0x80CB +#define GL_CONSTANT_COLOR 0x8001 +#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002 +#define GL_CONSTANT_ALPHA 0x8003 +#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004 +#define GL_BLEND_COLOR 0x8005 +#define GL_ARRAY_BUFFER 0x8892 +#define GL_ELEMENT_ARRAY_BUFFER 0x8893 +#define GL_ARRAY_BUFFER_BINDING 0x8894 +#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895 +#define GL_STREAM_DRAW 0x88E0 +#define GL_STATIC_DRAW 0x88E4 +#define GL_DYNAMIC_DRAW 0x88E8 +#define GL_BUFFER_SIZE 0x8764 +#define GL_BUFFER_USAGE 0x8765 +#define GL_CURRENT_VERTEX_ATTRIB 0x8626 +#define GL_FRONT 0x0404 +#define GL_BACK 0x0405 +#define GL_FRONT_AND_BACK 0x0408 +#define GL_TEXTURE_2D 0x0DE1 +#define GL_CULL_FACE 0x0B44 +#define GL_BLEND 0x0BE2 +#define GL_DITHER 0x0BD0 +#define GL_STENCIL_TEST 0x0B90 +#define GL_DEPTH_TEST 0x0B71 +#define GL_SCISSOR_TEST 0x0C11 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E +#define GL_SAMPLE_COVERAGE 0x80A0 +#define GL_NO_ERROR 0 +#define GL_INVALID_ENUM 0x0500 +#define GL_INVALID_VALUE 0x0501 +#define GL_INVALID_OPERATION 0x0502 +#define GL_OUT_OF_MEMORY 0x0505 +#define GL_CW 0x0900 +#define GL_CCW 0x0901 +#define GL_LINE_WIDTH 0x0B21 +#define GL_ALIASED_POINT_SIZE_RANGE 0x846D +#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E +#define GL_CULL_FACE_MODE 0x0B45 +#define GL_FRONT_FACE 0x0B46 +#define GL_DEPTH_RANGE 0x0B70 +#define GL_DEPTH_WRITEMASK 0x0B72 +#define GL_DEPTH_CLEAR_VALUE 0x0B73 +#define GL_DEPTH_FUNC 0x0B74 +#define GL_STENCIL_CLEAR_VALUE 0x0B91 +#define GL_STENCIL_FUNC 0x0B92 +#define GL_STENCIL_FAIL 0x0B94 +#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95 +#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96 +#define GL_STENCIL_REF 0x0B97 +#define GL_STENCIL_VALUE_MASK 0x0B93 +#define GL_STENCIL_WRITEMASK 0x0B98 +#define GL_STENCIL_BACK_FUNC 0x8800 +#define GL_STENCIL_BACK_FAIL 0x8801 +#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802 +#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803 +#define GL_STENCIL_BACK_REF 0x8CA3 +#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4 +#define GL_STENCIL_BACK_WRITEMASK 0x8CA5 +#define GL_VIEWPORT 0x0BA2 +#define GL_SCISSOR_BOX 0x0C10 +#define GL_COLOR_CLEAR_VALUE 0x0C22 +#define GL_COLOR_WRITEMASK 0x0C23 +#define GL_UNPACK_ALIGNMENT 0x0CF5 +#define GL_PACK_ALIGNMENT 0x0D05 +#define GL_MAX_TEXTURE_SIZE 0x0D33 +#define GL_MAX_VIEWPORT_DIMS 0x0D3A +#define GL_SUBPIXEL_BITS 0x0D50 +#define GL_RED_BITS 0x0D52 +#define GL_GREEN_BITS 0x0D53 +#define GL_BLUE_BITS 0x0D54 +#define GL_ALPHA_BITS 0x0D55 +#define GL_DEPTH_BITS 0x0D56 +#define GL_STENCIL_BITS 0x0D57 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_SAMPLE_BUFFERS 0x80A8 +#define GL_SAMPLES 0x80A9 +#define GL_SAMPLE_COVERAGE_VALUE 0x80AA +#define GL_SAMPLE_COVERAGE_INVERT 0x80AB +#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2 +#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3 +#define GL_DONT_CARE 0x1100 +#define GL_FASTEST 0x1101 +#define GL_NICEST 0x1102 +#define GL_GENERATE_MIPMAP_HINT 0x8192 +#define GL_BYTE 0x1400 +#define GL_UNSIGNED_BYTE 0x1401 +#define GL_SHORT 0x1402 +#define GL_UNSIGNED_SHORT 0x1403 +#define GL_INT 0x1404 +#define GL_UNSIGNED_INT 0x1405 +#define GL_FLOAT 0x1406 +#define GL_FIXED 0x140C +#define GL_DEPTH_COMPONENT 0x1902 +#define GL_ALPHA 0x1906 +#define GL_RGB 0x1907 +#define GL_RGBA 0x1908 +#define GL_LUMINANCE 0x1909 +#define GL_LUMINANCE_ALPHA 0x190A +#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 +#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 +#define GL_UNSIGNED_SHORT_5_6_5 0x8363 +#define GL_FRAGMENT_SHADER 0x8B30 +#define GL_VERTEX_SHADER 0x8B31 +#define GL_MAX_VERTEX_ATTRIBS 0x8869 +#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB +#define GL_MAX_VARYING_VECTORS 0x8DFC +#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D +#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C +#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872 +#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD +#define GL_SHADER_TYPE 0x8B4F +#define GL_DELETE_STATUS 0x8B80 +#define GL_LINK_STATUS 0x8B82 +#define GL_VALIDATE_STATUS 0x8B83 +#define GL_ATTACHED_SHADERS 0x8B85 +#define GL_ACTIVE_UNIFORMS 0x8B86 +#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87 +#define GL_ACTIVE_ATTRIBUTES 0x8B89 +#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A +#define GL_SHADING_LANGUAGE_VERSION 0x8B8C +#define GL_CURRENT_PROGRAM 0x8B8D +#define GL_NEVER 0x0200 +#define GL_LESS 0x0201 +#define GL_EQUAL 0x0202 +#define GL_LEQUAL 0x0203 +#define GL_GREATER 0x0204 +#define GL_NOTEQUAL 0x0205 +#define GL_GEQUAL 0x0206 +#define GL_ALWAYS 0x0207 +#define GL_KEEP 0x1E00 +#define GL_REPLACE 0x1E01 +#define GL_INCR 0x1E02 +#define GL_DECR 0x1E03 +#define GL_INVERT 0x150A +#define GL_INCR_WRAP 0x8507 +#define GL_DECR_WRAP 0x8508 +#define GL_VENDOR 0x1F00 +#define GL_RENDERER 0x1F01 +#define GL_VERSION 0x1F02 +#define GL_EXTENSIONS 0x1F03 +#define GL_NEAREST 0x2600 +#define GL_LINEAR 0x2601 +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#define GL_TEXTURE_MAG_FILTER 0x2800 +#define GL_TEXTURE_MIN_FILTER 0x2801 +#define GL_TEXTURE_WRAP_S 0x2802 +#define GL_TEXTURE_WRAP_T 0x2803 +#define GL_TEXTURE 0x1702 +#define GL_TEXTURE_CUBE_MAP 0x8513 +#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518 +#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519 +#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A +#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C +#define GL_TEXTURE0 0x84C0 +#define GL_TEXTURE1 0x84C1 +#define GL_TEXTURE2 0x84C2 +#define GL_TEXTURE3 0x84C3 +#define GL_TEXTURE4 0x84C4 +#define GL_TEXTURE5 0x84C5 +#define GL_TEXTURE6 0x84C6 +#define GL_TEXTURE7 0x84C7 +#define GL_TEXTURE8 0x84C8 +#define GL_TEXTURE9 0x84C9 +#define GL_TEXTURE10 0x84CA +#define GL_TEXTURE11 0x84CB +#define GL_TEXTURE12 0x84CC +#define GL_TEXTURE13 0x84CD +#define GL_TEXTURE14 0x84CE +#define GL_TEXTURE15 0x84CF +#define GL_TEXTURE16 0x84D0 +#define GL_TEXTURE17 0x84D1 +#define GL_TEXTURE18 0x84D2 +#define GL_TEXTURE19 0x84D3 +#define GL_TEXTURE20 0x84D4 +#define GL_TEXTURE21 0x84D5 +#define GL_TEXTURE22 0x84D6 +#define GL_TEXTURE23 0x84D7 +#define GL_TEXTURE24 0x84D8 +#define GL_TEXTURE25 0x84D9 +#define GL_TEXTURE26 0x84DA +#define GL_TEXTURE27 0x84DB +#define GL_TEXTURE28 0x84DC +#define GL_TEXTURE29 0x84DD +#define GL_TEXTURE30 0x84DE +#define GL_TEXTURE31 0x84DF +#define GL_ACTIVE_TEXTURE 0x84E0 +#define GL_REPEAT 0x2901 +#define GL_CLAMP_TO_EDGE 0x812F +#define GL_MIRRORED_REPEAT 0x8370 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622 +#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623 +#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624 +#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625 +#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A +#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645 +#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F +#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A +#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B +#define GL_COMPILE_STATUS 0x8B81 +#define GL_INFO_LOG_LENGTH 0x8B84 +#define GL_SHADER_SOURCE_LENGTH 0x8B88 +#define GL_SHADER_COMPILER 0x8DFA +#define GL_SHADER_BINARY_FORMATS 0x8DF8 +#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9 +#define GL_LOW_FLOAT 0x8DF0 +#define GL_MEDIUM_FLOAT 0x8DF1 +#define GL_HIGH_FLOAT 0x8DF2 +#define GL_LOW_INT 0x8DF3 +#define GL_MEDIUM_INT 0x8DF4 +#define GL_HIGH_INT 0x8DF5 +#define GL_FRAMEBUFFER 0x8D40 +#define GL_RENDERBUFFER 0x8D41 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGB565 0x8D62 +#define GL_DEPTH_COMPONENT16 0x81A5 +#define GL_STENCIL_INDEX8 0x8D48 +#define GL_RENDERBUFFER_WIDTH 0x8D42 +#define GL_RENDERBUFFER_HEIGHT 0x8D43 +#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44 +#define GL_RENDERBUFFER_RED_SIZE 0x8D50 +#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51 +#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52 +#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53 +#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54 +#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0 +#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2 +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3 +#define GL_COLOR_ATTACHMENT0 0x8CE0 +#define GL_DEPTH_ATTACHMENT 0x8D00 +#define GL_STENCIL_ATTACHMENT 0x8D20 +#define GL_NONE 0 +#define GL_FRAMEBUFFER_COMPLETE 0x8CD5 +#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6 +#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7 +#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS 0x8CD9 +#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD +#define GL_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_RENDERBUFFER_BINDING 0x8CA7 +#define GL_MAX_RENDERBUFFER_SIZE 0x84E8 +#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506 +typedef void (GL_APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture); +typedef void (GL_APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer); +typedef void (GL_APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLBINDTEXTUREPROC) (GLenum target, GLuint texture); +typedef void (GL_APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCPROC) (GLenum sfactor, GLenum dfactor); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +typedef void (GL_APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +typedef void (GL_APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +typedef GLenum (GL_APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLCLEARPROC) (GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLCLEARCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +typedef void (GL_APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d); +typedef void (GL_APIENTRYP PFNGLCLEARSTENCILPROC) (GLint s); +typedef void (GL_APIENTRYP PFNGLCOLORMASKPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +typedef void (GL_APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOPYTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef GLuint (GL_APIENTRYP PFNGLCREATEPROGRAMPROC) (void); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROC) (GLenum type); +typedef void (GL_APIENTRYP PFNGLCULLFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader); +typedef void (GL_APIENTRYP PFNGLDELETETEXTURESPROC) (GLsizei n, const GLuint *textures); +typedef void (GL_APIENTRYP PFNGLDEPTHFUNCPROC) (GLenum func); +typedef void (GL_APIENTRYP PFNGLDEPTHMASKPROC) (GLboolean flag); +typedef void (GL_APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f); +typedef void (GL_APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader); +typedef void (GL_APIENTRYP PFNGLDISABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSPROC) (GLenum mode, GLint first, GLsizei count); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLENABLEPROC) (GLenum cap); +typedef void (GL_APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index); +typedef void (GL_APIENTRYP PFNGLFINISHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFLUSHPROC) (void); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLFRONTFACEPROC) (GLenum mode); +typedef void (GL_APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers); +typedef void (GL_APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers); +typedef void (GL_APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers); +typedef void (GL_APIENTRYP PFNGLGENTEXTURESPROC) (GLsizei n, GLuint *textures); +typedef void (GL_APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +typedef GLint (GL_APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANVPROC) (GLenum pname, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef GLenum (GL_APIENTRYP PFNGLGETERRORPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETFLOATVPROC) (GLenum pname, GLfloat *data); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETINTEGERVPROC) (GLenum pname, GLint *data); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +typedef void (GL_APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGPROC) (GLenum name); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer); +typedef void (GL_APIENTRYP PFNGLHINTPROC) (GLenum target, GLenum mode); +typedef GLboolean (GL_APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDPROC) (GLenum cap); +typedef GLboolean (GL_APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPROC) (GLuint program); +typedef GLboolean (GL_APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer); +typedef GLboolean (GL_APIENTRYP PFNGLISSHADERPROC) (GLuint shader); +typedef GLboolean (GL_APIENTRYP PFNGLISTEXTUREPROC) (GLuint texture); +typedef void (GL_APIENTRYP PFNGLLINEWIDTHPROC) (GLfloat width); +typedef void (GL_APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLPIXELSTOREIPROC) (GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLPOLYGONOFFSETPROC) (GLfloat factor, GLfloat units); +typedef void (GL_APIENTRYP PFNGLREADPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +typedef void (GL_APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert); +typedef void (GL_APIENTRYP PFNGLSCISSORPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCPROC) (GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKPROC) (GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask); +typedef void (GL_APIENTRYP PFNGLSTENCILOPPROC) (GLenum fail, GLenum zfail, GLenum zpass); +typedef void (GL_APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE2DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLVIEWPORTPROC) (GLint x, GLint y, GLsizei width, GLsizei height); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glActiveTexture (GLenum texture); +GL_APICALL void GL_APIENTRY glAttachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name); +GL_APICALL void GL_APIENTRY glBindBuffer (GLenum target, GLuint buffer); +GL_APICALL void GL_APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer); +GL_APICALL void GL_APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glBindTexture (GLenum target, GLuint texture); +GL_APICALL void GL_APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glBlendEquation (GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunc (GLenum sfactor, GLenum dfactor); +GL_APICALL void GL_APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha); +GL_APICALL void GL_APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage); +GL_APICALL void GL_APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data); +GL_APICALL GLenum GL_APIENTRY glCheckFramebufferStatus (GLenum target); +GL_APICALL void GL_APIENTRY glClear (GLbitfield mask); +GL_APICALL void GL_APIENTRY glClearColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha); +GL_APICALL void GL_APIENTRY glClearDepthf (GLfloat d); +GL_APICALL void GL_APIENTRY glClearStencil (GLint s); +GL_APICALL void GL_APIENTRY glColorMask (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha); +GL_APICALL void GL_APIENTRY glCompileShader (GLuint shader); +GL_APICALL void GL_APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCopyTexImage2D (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border); +GL_APICALL void GL_APIENTRY glCopyTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL GLuint GL_APIENTRY glCreateProgram (void); +GL_APICALL GLuint GL_APIENTRY glCreateShader (GLenum type); +GL_APICALL void GL_APIENTRY glCullFace (GLenum mode); +GL_APICALL void GL_APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers); +GL_APICALL void GL_APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glDeleteProgram (GLuint program); +GL_APICALL void GL_APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glDeleteShader (GLuint shader); +GL_APICALL void GL_APIENTRY glDeleteTextures (GLsizei n, const GLuint *textures); +GL_APICALL void GL_APIENTRY glDepthFunc (GLenum func); +GL_APICALL void GL_APIENTRY glDepthMask (GLboolean flag); +GL_APICALL void GL_APIENTRY glDepthRangef (GLfloat n, GLfloat f); +GL_APICALL void GL_APIENTRY glDetachShader (GLuint program, GLuint shader); +GL_APICALL void GL_APIENTRY glDisable (GLenum cap); +GL_APICALL void GL_APIENTRY glDisableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glDrawArrays (GLenum mode, GLint first, GLsizei count); +GL_APICALL void GL_APIENTRY glDrawElements (GLenum mode, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glEnable (GLenum cap); +GL_APICALL void GL_APIENTRY glEnableVertexAttribArray (GLuint index); +GL_APICALL void GL_APIENTRY glFinish (void); +GL_APICALL void GL_APIENTRY glFlush (void); +GL_APICALL void GL_APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer); +GL_APICALL void GL_APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glFrontFace (GLenum mode); +GL_APICALL void GL_APIENTRY glGenBuffers (GLsizei n, GLuint *buffers); +GL_APICALL void GL_APIENTRY glGenerateMipmap (GLenum target); +GL_APICALL void GL_APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers); +GL_APICALL void GL_APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers); +GL_APICALL void GL_APIENTRY glGenTextures (GLsizei n, GLuint *textures); +GL_APICALL void GL_APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders); +GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetBooleanv (GLenum pname, GLboolean *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL GLenum GL_APIENTRY glGetError (void); +GL_APICALL void GL_APIENTRY glGetFloatv (GLenum pname, GLfloat *data); +GL_APICALL void GL_APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetIntegerv (GLenum pname, GLint *data); +GL_APICALL void GL_APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision); +GL_APICALL void GL_APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source); +GL_APICALL const GLubyte *GL_APIENTRY glGetString (GLenum name); +GL_APICALL void GL_APIENTRY glGetTexParameterfv (GLenum target, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetTexParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetUniformLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer); +GL_APICALL void GL_APIENTRY glHint (GLenum target, GLenum mode); +GL_APICALL GLboolean GL_APIENTRY glIsBuffer (GLuint buffer); +GL_APICALL GLboolean GL_APIENTRY glIsEnabled (GLenum cap); +GL_APICALL GLboolean GL_APIENTRY glIsFramebuffer (GLuint framebuffer); +GL_APICALL GLboolean GL_APIENTRY glIsProgram (GLuint program); +GL_APICALL GLboolean GL_APIENTRY glIsRenderbuffer (GLuint renderbuffer); +GL_APICALL GLboolean GL_APIENTRY glIsShader (GLuint shader); +GL_APICALL GLboolean GL_APIENTRY glIsTexture (GLuint texture); +GL_APICALL void GL_APIENTRY glLineWidth (GLfloat width); +GL_APICALL void GL_APIENTRY glLinkProgram (GLuint program); +GL_APICALL void GL_APIENTRY glPixelStorei (GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glPolygonOffset (GLfloat factor, GLfloat units); +GL_APICALL void GL_APIENTRY glReadPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels); +GL_APICALL void GL_APIENTRY glReleaseShaderCompiler (void); +GL_APICALL void GL_APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glSampleCoverage (GLfloat value, GLboolean invert); +GL_APICALL void GL_APIENTRY glScissor (GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length); +GL_APICALL void GL_APIENTRY glStencilFunc (GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMask (GLuint mask); +GL_APICALL void GL_APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask); +GL_APICALL void GL_APIENTRY glStencilOp (GLenum fail, GLenum zfail, GLenum zpass); +GL_APICALL void GL_APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass); +GL_APICALL void GL_APIENTRY glTexImage2D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexParameterf (GLenum target, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glTexParameterfv (GLenum target, GLenum pname, const GLfloat *params); +GL_APICALL void GL_APIENTRY glTexParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glTexParameteriv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glUniform1f (GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform1i (GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform2i (GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUseProgram (GLuint program); +GL_APICALL void GL_APIENTRY glValidateProgram (GLuint program); +GL_APICALL void GL_APIENTRY glVertexAttrib1f (GLuint index, GLfloat x); +GL_APICALL void GL_APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y); +GL_APICALL void GL_APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z); +GL_APICALL void GL_APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w); +GL_APICALL void GL_APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v); +GL_APICALL void GL_APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glViewport (GLint x, GLint y, GLsizei width, GLsizei height); +#endif +#endif /* GL_ES_VERSION_2_0 */ + +#ifndef GL_ES_VERSION_3_0 +#define GL_ES_VERSION_3_0 1 +typedef unsigned short GLhalf; +#define GL_READ_BUFFER 0x0C02 +#define GL_UNPACK_ROW_LENGTH 0x0CF2 +#define GL_UNPACK_SKIP_ROWS 0x0CF3 +#define GL_UNPACK_SKIP_PIXELS 0x0CF4 +#define GL_PACK_ROW_LENGTH 0x0D02 +#define GL_PACK_SKIP_ROWS 0x0D03 +#define GL_PACK_SKIP_PIXELS 0x0D04 +#define GL_COLOR 0x1800 +#define GL_DEPTH 0x1801 +#define GL_STENCIL 0x1802 +#define GL_RED 0x1903 +#define GL_RGB8 0x8051 +#define GL_RGBA8 0x8058 +#define GL_RGB10_A2 0x8059 +#define GL_TEXTURE_BINDING_3D 0x806A +#define GL_UNPACK_SKIP_IMAGES 0x806D +#define GL_UNPACK_IMAGE_HEIGHT 0x806E +#define GL_TEXTURE_3D 0x806F +#define GL_TEXTURE_WRAP_R 0x8072 +#define GL_MAX_3D_TEXTURE_SIZE 0x8073 +#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 +#define GL_MAX_ELEMENTS_VERTICES 0x80E8 +#define GL_MAX_ELEMENTS_INDICES 0x80E9 +#define GL_TEXTURE_MIN_LOD 0x813A +#define GL_TEXTURE_MAX_LOD 0x813B +#define GL_TEXTURE_BASE_LEVEL 0x813C +#define GL_TEXTURE_MAX_LEVEL 0x813D +#define GL_MIN 0x8007 +#define GL_MAX 0x8008 +#define GL_DEPTH_COMPONENT24 0x81A6 +#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD +#define GL_TEXTURE_COMPARE_MODE 0x884C +#define GL_TEXTURE_COMPARE_FUNC 0x884D +#define GL_CURRENT_QUERY 0x8865 +#define GL_QUERY_RESULT 0x8866 +#define GL_QUERY_RESULT_AVAILABLE 0x8867 +#define GL_BUFFER_MAPPED 0x88BC +#define GL_BUFFER_MAP_POINTER 0x88BD +#define GL_STREAM_READ 0x88E1 +#define GL_STREAM_COPY 0x88E2 +#define GL_STATIC_READ 0x88E5 +#define GL_STATIC_COPY 0x88E6 +#define GL_DYNAMIC_READ 0x88E9 +#define GL_DYNAMIC_COPY 0x88EA +#define GL_MAX_DRAW_BUFFERS 0x8824 +#define GL_DRAW_BUFFER0 0x8825 +#define GL_DRAW_BUFFER1 0x8826 +#define GL_DRAW_BUFFER2 0x8827 +#define GL_DRAW_BUFFER3 0x8828 +#define GL_DRAW_BUFFER4 0x8829 +#define GL_DRAW_BUFFER5 0x882A +#define GL_DRAW_BUFFER6 0x882B +#define GL_DRAW_BUFFER7 0x882C +#define GL_DRAW_BUFFER8 0x882D +#define GL_DRAW_BUFFER9 0x882E +#define GL_DRAW_BUFFER10 0x882F +#define GL_DRAW_BUFFER11 0x8830 +#define GL_DRAW_BUFFER12 0x8831 +#define GL_DRAW_BUFFER13 0x8832 +#define GL_DRAW_BUFFER14 0x8833 +#define GL_DRAW_BUFFER15 0x8834 +#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49 +#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B +#define GL_PIXEL_PACK_BUFFER 0x88EB +#define GL_PIXEL_UNPACK_BUFFER 0x88EC +#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED +#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A +#define GL_SRGB 0x8C40 +#define GL_SRGB8 0x8C41 +#define GL_SRGB8_ALPHA8 0x8C43 +#define GL_COMPARE_REF_TO_TEXTURE 0x884E +#define GL_MAJOR_VERSION 0x821B +#define GL_MINOR_VERSION 0x821C +#define GL_NUM_EXTENSIONS 0x821D +#define GL_RGBA32F 0x8814 +#define GL_RGB32F 0x8815 +#define GL_RGBA16F 0x881A +#define GL_RGB16F 0x881B +#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD +#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF +#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904 +#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905 +#define GL_MAX_VARYING_COMPONENTS 0x8B4B +#define GL_TEXTURE_2D_ARRAY 0x8C1A +#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D +#define GL_R11F_G11F_B10F 0x8C3A +#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B +#define GL_RGB9_E5 0x8C3D +#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E +#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76 +#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80 +#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83 +#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84 +#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85 +#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88 +#define GL_RASTERIZER_DISCARD 0x8C89 +#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A +#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B +#define GL_INTERLEAVED_ATTRIBS 0x8C8C +#define GL_SEPARATE_ATTRIBS 0x8C8D +#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E +#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F +#define GL_RGBA32UI 0x8D70 +#define GL_RGB32UI 0x8D71 +#define GL_RGBA16UI 0x8D76 +#define GL_RGB16UI 0x8D77 +#define GL_RGBA8UI 0x8D7C +#define GL_RGB8UI 0x8D7D +#define GL_RGBA32I 0x8D82 +#define GL_RGB32I 0x8D83 +#define GL_RGBA16I 0x8D88 +#define GL_RGB16I 0x8D89 +#define GL_RGBA8I 0x8D8E +#define GL_RGB8I 0x8D8F +#define GL_RED_INTEGER 0x8D94 +#define GL_RGB_INTEGER 0x8D98 +#define GL_RGBA_INTEGER 0x8D99 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_BUFFER_ACCESS_FLAGS 0x911F +#define GL_BUFFER_MAP_LENGTH 0x9120 +#define GL_BUFFER_MAP_OFFSET 0x9121 +#define GL_DEPTH_COMPONENT32F 0x8CAC +#define GL_DEPTH32F_STENCIL8 0x8CAD +#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD +#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210 +#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211 +#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212 +#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213 +#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214 +#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215 +#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216 +#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217 +#define GL_FRAMEBUFFER_DEFAULT 0x8218 +#define GL_FRAMEBUFFER_UNDEFINED 0x8219 +#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A +#define GL_DEPTH_STENCIL 0x84F9 +#define GL_UNSIGNED_INT_24_8 0x84FA +#define GL_DEPTH24_STENCIL8 0x88F0 +#define GL_UNSIGNED_NORMALIZED 0x8C17 +#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6 +#define GL_READ_FRAMEBUFFER 0x8CA8 +#define GL_DRAW_FRAMEBUFFER 0x8CA9 +#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA +#define GL_RENDERBUFFER_SAMPLES 0x8CAB +#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4 +#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF +#define GL_COLOR_ATTACHMENT1 0x8CE1 +#define GL_COLOR_ATTACHMENT2 0x8CE2 +#define GL_COLOR_ATTACHMENT3 0x8CE3 +#define GL_COLOR_ATTACHMENT4 0x8CE4 +#define GL_COLOR_ATTACHMENT5 0x8CE5 +#define GL_COLOR_ATTACHMENT6 0x8CE6 +#define GL_COLOR_ATTACHMENT7 0x8CE7 +#define GL_COLOR_ATTACHMENT8 0x8CE8 +#define GL_COLOR_ATTACHMENT9 0x8CE9 +#define GL_COLOR_ATTACHMENT10 0x8CEA +#define GL_COLOR_ATTACHMENT11 0x8CEB +#define GL_COLOR_ATTACHMENT12 0x8CEC +#define GL_COLOR_ATTACHMENT13 0x8CED +#define GL_COLOR_ATTACHMENT14 0x8CEE +#define GL_COLOR_ATTACHMENT15 0x8CEF +#define GL_COLOR_ATTACHMENT16 0x8CF0 +#define GL_COLOR_ATTACHMENT17 0x8CF1 +#define GL_COLOR_ATTACHMENT18 0x8CF2 +#define GL_COLOR_ATTACHMENT19 0x8CF3 +#define GL_COLOR_ATTACHMENT20 0x8CF4 +#define GL_COLOR_ATTACHMENT21 0x8CF5 +#define GL_COLOR_ATTACHMENT22 0x8CF6 +#define GL_COLOR_ATTACHMENT23 0x8CF7 +#define GL_COLOR_ATTACHMENT24 0x8CF8 +#define GL_COLOR_ATTACHMENT25 0x8CF9 +#define GL_COLOR_ATTACHMENT26 0x8CFA +#define GL_COLOR_ATTACHMENT27 0x8CFB +#define GL_COLOR_ATTACHMENT28 0x8CFC +#define GL_COLOR_ATTACHMENT29 0x8CFD +#define GL_COLOR_ATTACHMENT30 0x8CFE +#define GL_COLOR_ATTACHMENT31 0x8CFF +#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56 +#define GL_MAX_SAMPLES 0x8D57 +#define GL_HALF_FLOAT 0x140B +#define GL_MAP_READ_BIT 0x0001 +#define GL_MAP_WRITE_BIT 0x0002 +#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004 +#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008 +#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010 +#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020 +#define GL_RG 0x8227 +#define GL_RG_INTEGER 0x8228 +#define GL_R8 0x8229 +#define GL_RG8 0x822B +#define GL_R16F 0x822D +#define GL_R32F 0x822E +#define GL_RG16F 0x822F +#define GL_RG32F 0x8230 +#define GL_R8I 0x8231 +#define GL_R8UI 0x8232 +#define GL_R16I 0x8233 +#define GL_R16UI 0x8234 +#define GL_R32I 0x8235 +#define GL_R32UI 0x8236 +#define GL_RG8I 0x8237 +#define GL_RG8UI 0x8238 +#define GL_RG16I 0x8239 +#define GL_RG16UI 0x823A +#define GL_RG32I 0x823B +#define GL_RG32UI 0x823C +#define GL_VERTEX_ARRAY_BINDING 0x85B5 +#define GL_R8_SNORM 0x8F94 +#define GL_RG8_SNORM 0x8F95 +#define GL_RGB8_SNORM 0x8F96 +#define GL_RGBA8_SNORM 0x8F97 +#define GL_SIGNED_NORMALIZED 0x8F9C +#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69 +#define GL_COPY_READ_BUFFER 0x8F36 +#define GL_COPY_WRITE_BUFFER 0x8F37 +#define GL_COPY_READ_BUFFER_BINDING 0x8F36 +#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37 +#define GL_UNIFORM_BUFFER 0x8A11 +#define GL_UNIFORM_BUFFER_BINDING 0x8A28 +#define GL_UNIFORM_BUFFER_START 0x8A29 +#define GL_UNIFORM_BUFFER_SIZE 0x8A2A +#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B +#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D +#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E +#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F +#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30 +#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31 +#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33 +#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34 +#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35 +#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36 +#define GL_UNIFORM_TYPE 0x8A37 +#define GL_UNIFORM_SIZE 0x8A38 +#define GL_UNIFORM_NAME_LENGTH 0x8A39 +#define GL_UNIFORM_BLOCK_INDEX 0x8A3A +#define GL_UNIFORM_OFFSET 0x8A3B +#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C +#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D +#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E +#define GL_UNIFORM_BLOCK_BINDING 0x8A3F +#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40 +#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42 +#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44 +#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46 +#define GL_INVALID_INDEX 0xFFFFFFFFu +#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122 +#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125 +#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111 +#define GL_OBJECT_TYPE 0x9112 +#define GL_SYNC_CONDITION 0x9113 +#define GL_SYNC_STATUS 0x9114 +#define GL_SYNC_FLAGS 0x9115 +#define GL_SYNC_FENCE 0x9116 +#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117 +#define GL_UNSIGNALED 0x9118 +#define GL_SIGNALED 0x9119 +#define GL_ALREADY_SIGNALED 0x911A +#define GL_TIMEOUT_EXPIRED 0x911B +#define GL_CONDITION_SATISFIED 0x911C +#define GL_WAIT_FAILED 0x911D +#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001 +#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull +#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE +#define GL_ANY_SAMPLES_PASSED 0x8C2F +#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A +#define GL_SAMPLER_BINDING 0x8919 +#define GL_RGB10_A2UI 0x906F +#define GL_TEXTURE_SWIZZLE_R 0x8E42 +#define GL_TEXTURE_SWIZZLE_G 0x8E43 +#define GL_TEXTURE_SWIZZLE_B 0x8E44 +#define GL_TEXTURE_SWIZZLE_A 0x8E45 +#define GL_GREEN 0x1904 +#define GL_BLUE 0x1905 +#define GL_INT_2_10_10_10_REV 0x8D9F +#define GL_TRANSFORM_FEEDBACK 0x8E22 +#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23 +#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24 +#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25 +#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257 +#define GL_PROGRAM_BINARY_LENGTH 0x8741 +#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE +#define GL_PROGRAM_BINARY_FORMATS 0x87FF +#define GL_COMPRESSED_R11_EAC 0x9270 +#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271 +#define GL_COMPRESSED_RG11_EAC 0x9272 +#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273 +#define GL_COMPRESSED_RGB8_ETC2 0x9274 +#define GL_COMPRESSED_SRGB8_ETC2 0x9275 +#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276 +#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277 +#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278 +#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279 +#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F +#define GL_MAX_ELEMENT_INDEX 0x8D6B +#define GL_NUM_SAMPLE_COUNTS 0x9380 +#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF +typedef void (GL_APIENTRYP PFNGLREADBUFFERPROC) (GLenum src); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +typedef void (GL_APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +typedef void (GL_APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +typedef void (GL_APIENTRYP PFNGLGENQUERIESPROC) (GLsizei n, GLuint *ids); +typedef void (GL_APIENTRYP PFNGLDELETEQUERIESPROC) (GLsizei n, const GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISQUERYPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLBEGINQUERYPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLENDQUERYPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETQUERYIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETQUERYOBJECTUIVPROC) (GLuint id, GLenum pname, GLuint *params); +typedef GLboolean (GL_APIENTRYP PFNGLUNMAPBUFFERPROC) (GLenum target); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPOINTERVPROC) (GLenum target, GLenum pname, void **params); +typedef void (GL_APIENTRYP PFNGLDRAWBUFFERSPROC) (GLsizei n, const GLenum *bufs); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +typedef void (GL_APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +typedef void *(GL_APIENTRYP PFNGLMAPBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +typedef void (GL_APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLDELETEVERTEXARRAYSPROC) (GLsizei n, const GLuint *arrays); +typedef void (GL_APIENTRYP PFNGLGENVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays); +typedef GLboolean (GL_APIENTRYP PFNGLISVERTEXARRAYPROC) (GLuint array); +typedef void (GL_APIENTRYP PFNGLGETINTEGERI_VPROC) (GLenum target, GLuint index, GLint *data); +typedef void (GL_APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC) (GLenum primitiveMode); +typedef void (GL_APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERRANGEPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLBINDBUFFERBASEPROC) (GLenum target, GLuint index, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC) (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +typedef void (GL_APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC) (GLuint index, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC) (GLuint index, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4IVPROC) (GLuint index, const GLint *v); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC) (GLuint index, const GLuint *v); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMUIVPROC) (GLuint program, GLint location, GLuint *params); +typedef GLint (GL_APIENTRYP PFNGLGETFRAGDATALOCATIONPROC) (GLuint program, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIPROC) (GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIPROC) (GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLUNIFORM1UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM2UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM3UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLUNIFORM4UIVPROC) (GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERIVPROC) (GLenum buffer, GLint drawbuffer, const GLint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERUIVPROC) (GLenum buffer, GLint drawbuffer, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFVPROC) (GLenum buffer, GLint drawbuffer, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLCLEARBUFFERFIPROC) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +typedef const GLubyte *(GL_APIENTRYP PFNGLGETSTRINGIPROC) (GLenum name, GLuint index); +typedef void (GL_APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLGETUNIFORMINDICESPROC) (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC) (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC) (GLuint program, const GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +typedef void (GL_APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +typedef GLsync (GL_APIENTRYP PFNGLFENCESYNCPROC) (GLenum condition, GLbitfield flags); +typedef GLboolean (GL_APIENTRYP PFNGLISSYNCPROC) (GLsync sync); +typedef void (GL_APIENTRYP PFNGLDELETESYNCPROC) (GLsync sync); +typedef GLenum (GL_APIENTRYP PFNGLCLIENTWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64VPROC) (GLenum pname, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETSYNCIVPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +typedef void (GL_APIENTRYP PFNGLGETINTEGER64I_VPROC) (GLenum target, GLuint index, GLint64 *data); +typedef void (GL_APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC) (GLenum target, GLenum pname, GLint64 *params); +typedef void (GL_APIENTRYP PFNGLGENSAMPLERSPROC) (GLsizei count, GLuint *samplers); +typedef void (GL_APIENTRYP PFNGLDELETESAMPLERSPROC) (GLsizei count, const GLuint *samplers); +typedef GLboolean (GL_APIENTRYP PFNGLISSAMPLERPROC) (GLuint sampler); +typedef void (GL_APIENTRYP PFNGLBINDSAMPLERPROC) (GLuint unit, GLuint sampler); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIPROC) (GLuint sampler, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFPROC) (GLuint sampler, GLenum pname, GLfloat param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, const GLfloat *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC) (GLuint index, GLuint divisor); +typedef void (GL_APIENTRYP PFNGLBINDTRANSFORMFEEDBACKPROC) (GLenum target, GLuint id); +typedef void (GL_APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSPROC) (GLsizei n, const GLuint *ids); +typedef void (GL_APIENTRYP PFNGLGENTRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids); +typedef GLboolean (GL_APIENTRYP PFNGLISTRANSFORMFEEDBACKPROC) (GLuint id); +typedef void (GL_APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKPROC) (void); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMBINARYPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +typedef void (GL_APIENTRYP PFNGLPROGRAMBINARYPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +typedef void (GL_APIENTRYP PFNGLPROGRAMPARAMETERIPROC) (GLuint program, GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLINVALIDATEFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments); +typedef void (GL_APIENTRYP PFNGLINVALIDATESUBFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +typedef void (GL_APIENTRYP PFNGLGETINTERNALFORMATIVPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glReadBuffer (GLenum src); +GL_APICALL void GL_APIENTRY glDrawRangeElements (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices); +GL_APICALL void GL_APIENTRY glTexImage3D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels); +GL_APICALL void GL_APIENTRY glCopyTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glCompressedTexImage3D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glCompressedTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data); +GL_APICALL void GL_APIENTRY glGenQueries (GLsizei n, GLuint *ids); +GL_APICALL void GL_APIENTRY glDeleteQueries (GLsizei n, const GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsQuery (GLuint id); +GL_APICALL void GL_APIENTRY glBeginQuery (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glEndQuery (GLenum target); +GL_APICALL void GL_APIENTRY glGetQueryiv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetQueryObjectuiv (GLuint id, GLenum pname, GLuint *params); +GL_APICALL GLboolean GL_APIENTRY glUnmapBuffer (GLenum target); +GL_APICALL void GL_APIENTRY glGetBufferPointerv (GLenum target, GLenum pname, void **params); +GL_APICALL void GL_APIENTRY glDrawBuffers (GLsizei n, const GLenum *bufs); +GL_APICALL void GL_APIENTRY glUniformMatrix2x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix2x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix3x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glUniformMatrix4x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glBlitFramebuffer (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); +GL_APICALL void GL_APIENTRY glRenderbufferStorageMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glFramebufferTextureLayer (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer); +GL_APICALL void *GL_APIENTRY glMapBufferRange (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access); +GL_APICALL void GL_APIENTRY glFlushMappedBufferRange (GLenum target, GLintptr offset, GLsizeiptr length); +GL_APICALL void GL_APIENTRY glBindVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glDeleteVertexArrays (GLsizei n, const GLuint *arrays); +GL_APICALL void GL_APIENTRY glGenVertexArrays (GLsizei n, GLuint *arrays); +GL_APICALL GLboolean GL_APIENTRY glIsVertexArray (GLuint array); +GL_APICALL void GL_APIENTRY glGetIntegeri_v (GLenum target, GLuint index, GLint *data); +GL_APICALL void GL_APIENTRY glBeginTransformFeedback (GLenum primitiveMode); +GL_APICALL void GL_APIENTRY glEndTransformFeedback (void); +GL_APICALL void GL_APIENTRY glBindBufferRange (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glBindBufferBase (GLenum target, GLuint index, GLuint buffer); +GL_APICALL void GL_APIENTRY glTransformFeedbackVaryings (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode); +GL_APICALL void GL_APIENTRY glGetTransformFeedbackVarying (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name); +GL_APICALL void GL_APIENTRY glVertexAttribIPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer); +GL_APICALL void GL_APIENTRY glGetVertexAttribIiv (GLuint index, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetVertexAttribIuiv (GLuint index, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glVertexAttribI4i (GLuint index, GLint x, GLint y, GLint z, GLint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4ui (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w); +GL_APICALL void GL_APIENTRY glVertexAttribI4iv (GLuint index, const GLint *v); +GL_APICALL void GL_APIENTRY glVertexAttribI4uiv (GLuint index, const GLuint *v); +GL_APICALL void GL_APIENTRY glGetUniformuiv (GLuint program, GLint location, GLuint *params); +GL_APICALL GLint GL_APIENTRY glGetFragDataLocation (GLuint program, const GLchar *name); +GL_APICALL void GL_APIENTRY glUniform1ui (GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glUniform2ui (GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glUniform3ui (GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glUniform4ui (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glUniform1uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform2uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform3uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glUniform4uiv (GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferiv (GLenum buffer, GLint drawbuffer, const GLint *value); +GL_APICALL void GL_APIENTRY glClearBufferuiv (GLenum buffer, GLint drawbuffer, const GLuint *value); +GL_APICALL void GL_APIENTRY glClearBufferfv (GLenum buffer, GLint drawbuffer, const GLfloat *value); +GL_APICALL void GL_APIENTRY glClearBufferfi (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil); +GL_APICALL const GLubyte *GL_APIENTRY glGetStringi (GLenum name, GLuint index); +GL_APICALL void GL_APIENTRY glCopyBufferSubData (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glGetUniformIndices (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices); +GL_APICALL void GL_APIENTRY glGetActiveUniformsiv (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetUniformBlockIndex (GLuint program, const GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockiv (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetActiveUniformBlockName (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName); +GL_APICALL void GL_APIENTRY glUniformBlockBinding (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding); +GL_APICALL void GL_APIENTRY glDrawArraysInstanced (GLenum mode, GLint first, GLsizei count, GLsizei instancecount); +GL_APICALL void GL_APIENTRY glDrawElementsInstanced (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount); +GL_APICALL GLsync GL_APIENTRY glFenceSync (GLenum condition, GLbitfield flags); +GL_APICALL GLboolean GL_APIENTRY glIsSync (GLsync sync); +GL_APICALL void GL_APIENTRY glDeleteSync (GLsync sync); +GL_APICALL GLenum GL_APIENTRY glClientWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout); +GL_APICALL void GL_APIENTRY glGetInteger64v (GLenum pname, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetSynciv (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values); +GL_APICALL void GL_APIENTRY glGetInteger64i_v (GLenum target, GLuint index, GLint64 *data); +GL_APICALL void GL_APIENTRY glGetBufferParameteri64v (GLenum target, GLenum pname, GLint64 *params); +GL_APICALL void GL_APIENTRY glGenSamplers (GLsizei count, GLuint *samplers); +GL_APICALL void GL_APIENTRY glDeleteSamplers (GLsizei count, const GLuint *samplers); +GL_APICALL GLboolean GL_APIENTRY glIsSampler (GLuint sampler); +GL_APICALL void GL_APIENTRY glBindSampler (GLuint unit, GLuint sampler); +GL_APICALL void GL_APIENTRY glSamplerParameteri (GLuint sampler, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param); +GL_APICALL void GL_APIENTRY glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glVertexAttribDivisor (GLuint index, GLuint divisor); +GL_APICALL void GL_APIENTRY glBindTransformFeedback (GLenum target, GLuint id); +GL_APICALL void GL_APIENTRY glDeleteTransformFeedbacks (GLsizei n, const GLuint *ids); +GL_APICALL void GL_APIENTRY glGenTransformFeedbacks (GLsizei n, GLuint *ids); +GL_APICALL GLboolean GL_APIENTRY glIsTransformFeedback (GLuint id); +GL_APICALL void GL_APIENTRY glPauseTransformFeedback (void); +GL_APICALL void GL_APIENTRY glResumeTransformFeedback (void); +GL_APICALL void GL_APIENTRY glGetProgramBinary (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary); +GL_APICALL void GL_APIENTRY glProgramBinary (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length); +GL_APICALL void GL_APIENTRY glProgramParameteri (GLuint program, GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glInvalidateFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments); +GL_APICALL void GL_APIENTRY glInvalidateSubFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage2D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); +GL_APICALL void GL_APIENTRY glTexStorage3D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); +GL_APICALL void GL_APIENTRY glGetInternalformativ (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params); +#endif +#endif /* GL_ES_VERSION_3_0 */ + +#ifndef GL_ES_VERSION_3_1 +#define GL_ES_VERSION_3_1 1 +#define GL_COMPUTE_SHADER 0x91B9 +#define GL_MAX_COMPUTE_UNIFORM_BLOCKS 0x91BB +#define GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS 0x91BC +#define GL_MAX_COMPUTE_IMAGE_UNIFORMS 0x91BD +#define GL_MAX_COMPUTE_SHARED_MEMORY_SIZE 0x8262 +#define GL_MAX_COMPUTE_UNIFORM_COMPONENTS 0x8263 +#define GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS 0x8264 +#define GL_MAX_COMPUTE_ATOMIC_COUNTERS 0x8265 +#define GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS 0x8266 +#define GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS 0x90EB +#define GL_MAX_COMPUTE_WORK_GROUP_COUNT 0x91BE +#define GL_MAX_COMPUTE_WORK_GROUP_SIZE 0x91BF +#define GL_COMPUTE_WORK_GROUP_SIZE 0x8267 +#define GL_DISPATCH_INDIRECT_BUFFER 0x90EE +#define GL_DISPATCH_INDIRECT_BUFFER_BINDING 0x90EF +#define GL_COMPUTE_SHADER_BIT 0x00000020 +#define GL_DRAW_INDIRECT_BUFFER 0x8F3F +#define GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43 +#define GL_MAX_UNIFORM_LOCATIONS 0x826E +#define GL_FRAMEBUFFER_DEFAULT_WIDTH 0x9310 +#define GL_FRAMEBUFFER_DEFAULT_HEIGHT 0x9311 +#define GL_FRAMEBUFFER_DEFAULT_SAMPLES 0x9313 +#define GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS 0x9314 +#define GL_MAX_FRAMEBUFFER_WIDTH 0x9315 +#define GL_MAX_FRAMEBUFFER_HEIGHT 0x9316 +#define GL_MAX_FRAMEBUFFER_SAMPLES 0x9318 +#define GL_UNIFORM 0x92E1 +#define GL_UNIFORM_BLOCK 0x92E2 +#define GL_PROGRAM_INPUT 0x92E3 +#define GL_PROGRAM_OUTPUT 0x92E4 +#define GL_BUFFER_VARIABLE 0x92E5 +#define GL_SHADER_STORAGE_BLOCK 0x92E6 +#define GL_ATOMIC_COUNTER_BUFFER 0x92C0 +#define GL_TRANSFORM_FEEDBACK_VARYING 0x92F4 +#define GL_ACTIVE_RESOURCES 0x92F5 +#define GL_MAX_NAME_LENGTH 0x92F6 +#define GL_MAX_NUM_ACTIVE_VARIABLES 0x92F7 +#define GL_NAME_LENGTH 0x92F9 +#define GL_TYPE 0x92FA +#define GL_ARRAY_SIZE 0x92FB +#define GL_OFFSET 0x92FC +#define GL_BLOCK_INDEX 0x92FD +#define GL_ARRAY_STRIDE 0x92FE +#define GL_MATRIX_STRIDE 0x92FF +#define GL_IS_ROW_MAJOR 0x9300 +#define GL_ATOMIC_COUNTER_BUFFER_INDEX 0x9301 +#define GL_BUFFER_BINDING 0x9302 +#define GL_BUFFER_DATA_SIZE 0x9303 +#define GL_NUM_ACTIVE_VARIABLES 0x9304 +#define GL_ACTIVE_VARIABLES 0x9305 +#define GL_REFERENCED_BY_VERTEX_SHADER 0x9306 +#define GL_REFERENCED_BY_FRAGMENT_SHADER 0x930A +#define GL_REFERENCED_BY_COMPUTE_SHADER 0x930B +#define GL_TOP_LEVEL_ARRAY_SIZE 0x930C +#define GL_TOP_LEVEL_ARRAY_STRIDE 0x930D +#define GL_LOCATION 0x930E +#define GL_VERTEX_SHADER_BIT 0x00000001 +#define GL_FRAGMENT_SHADER_BIT 0x00000002 +#define GL_ALL_SHADER_BITS 0xFFFFFFFF +#define GL_PROGRAM_SEPARABLE 0x8258 +#define GL_ACTIVE_PROGRAM 0x8259 +#define GL_PROGRAM_PIPELINE_BINDING 0x825A +#define GL_ATOMIC_COUNTER_BUFFER_BINDING 0x92C1 +#define GL_ATOMIC_COUNTER_BUFFER_START 0x92C2 +#define GL_ATOMIC_COUNTER_BUFFER_SIZE 0x92C3 +#define GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS 0x92CC +#define GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS 0x92D0 +#define GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS 0x92D1 +#define GL_MAX_VERTEX_ATOMIC_COUNTERS 0x92D2 +#define GL_MAX_FRAGMENT_ATOMIC_COUNTERS 0x92D6 +#define GL_MAX_COMBINED_ATOMIC_COUNTERS 0x92D7 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE 0x92D8 +#define GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS 0x92DC +#define GL_ACTIVE_ATOMIC_COUNTER_BUFFERS 0x92D9 +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB +#define GL_MAX_IMAGE_UNITS 0x8F38 +#define GL_MAX_VERTEX_IMAGE_UNIFORMS 0x90CA +#define GL_MAX_FRAGMENT_IMAGE_UNIFORMS 0x90CE +#define GL_MAX_COMBINED_IMAGE_UNIFORMS 0x90CF +#define GL_IMAGE_BINDING_NAME 0x8F3A +#define GL_IMAGE_BINDING_LEVEL 0x8F3B +#define GL_IMAGE_BINDING_LAYERED 0x8F3C +#define GL_IMAGE_BINDING_LAYER 0x8F3D +#define GL_IMAGE_BINDING_ACCESS 0x8F3E +#define GL_IMAGE_BINDING_FORMAT 0x906E +#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT 0x00000001 +#define GL_ELEMENT_ARRAY_BARRIER_BIT 0x00000002 +#define GL_UNIFORM_BARRIER_BIT 0x00000004 +#define GL_TEXTURE_FETCH_BARRIER_BIT 0x00000008 +#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT 0x00000020 +#define GL_COMMAND_BARRIER_BIT 0x00000040 +#define GL_PIXEL_BUFFER_BARRIER_BIT 0x00000080 +#define GL_TEXTURE_UPDATE_BARRIER_BIT 0x00000100 +#define GL_BUFFER_UPDATE_BARRIER_BIT 0x00000200 +#define GL_FRAMEBUFFER_BARRIER_BIT 0x00000400 +#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT 0x00000800 +#define GL_ATOMIC_COUNTER_BARRIER_BIT 0x00001000 +#define GL_ALL_BARRIER_BITS 0xFFFFFFFF +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_IMAGE_FORMAT_COMPATIBILITY_TYPE 0x90C7 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE 0x90C8 +#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS 0x90C9 +#define GL_READ_ONLY 0x88B8 +#define GL_WRITE_ONLY 0x88B9 +#define GL_READ_WRITE 0x88BA +#define GL_SHADER_STORAGE_BUFFER 0x90D2 +#define GL_SHADER_STORAGE_BUFFER_BINDING 0x90D3 +#define GL_SHADER_STORAGE_BUFFER_START 0x90D4 +#define GL_SHADER_STORAGE_BUFFER_SIZE 0x90D5 +#define GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS 0x90D6 +#define GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS 0x90DA +#define GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS 0x90DB +#define GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS 0x90DC +#define GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS 0x90DD +#define GL_MAX_SHADER_STORAGE_BLOCK_SIZE 0x90DE +#define GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT 0x90DF +#define GL_SHADER_STORAGE_BARRIER_BIT 0x00002000 +#define GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES 0x8F39 +#define GL_DEPTH_STENCIL_TEXTURE_MODE 0x90EA +#define GL_STENCIL_INDEX 0x1901 +#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5E +#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5F +#define GL_SAMPLE_POSITION 0x8E50 +#define GL_SAMPLE_MASK 0x8E51 +#define GL_SAMPLE_MASK_VALUE 0x8E52 +#define GL_TEXTURE_2D_MULTISAMPLE 0x9100 +#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59 +#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E +#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F +#define GL_MAX_INTEGER_SAMPLES 0x9110 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104 +#define GL_TEXTURE_SAMPLES 0x9106 +#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107 +#define GL_TEXTURE_WIDTH 0x1000 +#define GL_TEXTURE_HEIGHT 0x1001 +#define GL_TEXTURE_DEPTH 0x8071 +#define GL_TEXTURE_INTERNAL_FORMAT 0x1003 +#define GL_TEXTURE_RED_SIZE 0x805C +#define GL_TEXTURE_GREEN_SIZE 0x805D +#define GL_TEXTURE_BLUE_SIZE 0x805E +#define GL_TEXTURE_ALPHA_SIZE 0x805F +#define GL_TEXTURE_DEPTH_SIZE 0x884A +#define GL_TEXTURE_STENCIL_SIZE 0x88F1 +#define GL_TEXTURE_SHARED_SIZE 0x8C3F +#define GL_TEXTURE_RED_TYPE 0x8C10 +#define GL_TEXTURE_GREEN_TYPE 0x8C11 +#define GL_TEXTURE_BLUE_TYPE 0x8C12 +#define GL_TEXTURE_ALPHA_TYPE 0x8C13 +#define GL_TEXTURE_DEPTH_TYPE 0x8C16 +#define GL_TEXTURE_COMPRESSED 0x86A1 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A +#define GL_VERTEX_ATTRIB_BINDING 0x82D4 +#define GL_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D5 +#define GL_VERTEX_BINDING_DIVISOR 0x82D6 +#define GL_VERTEX_BINDING_OFFSET 0x82D7 +#define GL_VERTEX_BINDING_STRIDE 0x82D8 +#define GL_VERTEX_BINDING_BUFFER 0x8F4F +#define GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D9 +#define GL_MAX_VERTEX_ATTRIB_BINDINGS 0x82DA +#define GL_MAX_VERTEX_ATTRIB_STRIDE 0x82E5 +typedef void (GL_APIENTRYP PFNGLDISPATCHCOMPUTEPROC) (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +typedef void (GL_APIENTRYP PFNGLDISPATCHCOMPUTEINDIRECTPROC) (GLintptr indirect); +typedef void (GL_APIENTRYP PFNGLDRAWARRAYSINDIRECTPROC) (GLenum mode, const void *indirect); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINDIRECTPROC) (GLenum mode, GLenum type, const void *indirect); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERPARAMETERIPROC) (GLenum target, GLenum pname, GLint param); +typedef void (GL_APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMINTERFACEIVPROC) (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +typedef GLuint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEINDEXPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCENAMEPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMRESOURCEIVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +typedef GLint (GL_APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONPROC) (GLuint program, GLenum programInterface, const GLchar *name); +typedef void (GL_APIENTRYP PFNGLUSEPROGRAMSTAGESPROC) (GLuint pipeline, GLbitfield stages, GLuint program); +typedef void (GL_APIENTRYP PFNGLACTIVESHADERPROGRAMPROC) (GLuint pipeline, GLuint program); +typedef GLuint (GL_APIENTRYP PFNGLCREATESHADERPROGRAMVPROC) (GLenum type, GLsizei count, const GLchar *const*strings); +typedef void (GL_APIENTRYP PFNGLBINDPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLDELETEPROGRAMPIPELINESPROC) (GLsizei n, const GLuint *pipelines); +typedef void (GL_APIENTRYP PFNGLGENPROGRAMPIPELINESPROC) (GLsizei n, GLuint *pipelines); +typedef GLboolean (GL_APIENTRYP PFNGLISPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEIVPROC) (GLuint pipeline, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IPROC) (GLuint program, GLint location, GLint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IPROC) (GLuint program, GLint location, GLint v0, GLint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIPROC) (GLuint program, GLint location, GLuint v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FPROC) (GLuint program, GLint location, GLfloat v0); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM1FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM2FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM3FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORM4FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +typedef void (GL_APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEPROC) (GLuint pipeline); +typedef void (GL_APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +typedef void (GL_APIENTRYP PFNGLBINDIMAGETEXTUREPROC) (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +typedef void (GL_APIENTRYP PFNGLGETBOOLEANI_VPROC) (GLenum target, GLuint index, GLboolean *data); +typedef void (GL_APIENTRYP PFNGLMEMORYBARRIERPROC) (GLbitfield barriers); +typedef void (GL_APIENTRYP PFNGLMEMORYBARRIERBYREGIONPROC) (GLbitfield barriers); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE2DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +typedef void (GL_APIENTRYP PFNGLGETMULTISAMPLEFVPROC) (GLenum pname, GLuint index, GLfloat *val); +typedef void (GL_APIENTRYP PFNGLSAMPLEMASKIPROC) (GLuint maskNumber, GLbitfield mask); +typedef void (GL_APIENTRYP PFNGLGETTEXLEVELPARAMETERIVPROC) (GLenum target, GLint level, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXLEVELPARAMETERFVPROC) (GLenum target, GLint level, GLenum pname, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLBINDVERTEXBUFFERPROC) (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBIFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +typedef void (GL_APIENTRYP PFNGLVERTEXATTRIBBINDINGPROC) (GLuint attribindex, GLuint bindingindex); +typedef void (GL_APIENTRYP PFNGLVERTEXBINDINGDIVISORPROC) (GLuint bindingindex, GLuint divisor); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glDispatchCompute (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z); +GL_APICALL void GL_APIENTRY glDispatchComputeIndirect (GLintptr indirect); +GL_APICALL void GL_APIENTRY glDrawArraysIndirect (GLenum mode, const void *indirect); +GL_APICALL void GL_APIENTRY glDrawElementsIndirect (GLenum mode, GLenum type, const void *indirect); +GL_APICALL void GL_APIENTRY glFramebufferParameteri (GLenum target, GLenum pname, GLint param); +GL_APICALL void GL_APIENTRY glGetFramebufferParameteriv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetProgramInterfaceiv (GLuint program, GLenum programInterface, GLenum pname, GLint *params); +GL_APICALL GLuint GL_APIENTRY glGetProgramResourceIndex (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL void GL_APIENTRY glGetProgramResourceName (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name); +GL_APICALL void GL_APIENTRY glGetProgramResourceiv (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params); +GL_APICALL GLint GL_APIENTRY glGetProgramResourceLocation (GLuint program, GLenum programInterface, const GLchar *name); +GL_APICALL void GL_APIENTRY glUseProgramStages (GLuint pipeline, GLbitfield stages, GLuint program); +GL_APICALL void GL_APIENTRY glActiveShaderProgram (GLuint pipeline, GLuint program); +GL_APICALL GLuint GL_APIENTRY glCreateShaderProgramv (GLenum type, GLsizei count, const GLchar *const*strings); +GL_APICALL void GL_APIENTRY glBindProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glDeleteProgramPipelines (GLsizei n, const GLuint *pipelines); +GL_APICALL void GL_APIENTRY glGenProgramPipelines (GLsizei n, GLuint *pipelines); +GL_APICALL GLboolean GL_APIENTRY glIsProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glGetProgramPipelineiv (GLuint pipeline, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glProgramUniform1i (GLuint program, GLint location, GLint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2i (GLuint program, GLint location, GLint v0, GLint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1ui (GLuint program, GLint location, GLuint v0); +GL_APICALL void GL_APIENTRY glProgramUniform2ui (GLuint program, GLint location, GLuint v0, GLuint v1); +GL_APICALL void GL_APIENTRY glProgramUniform3ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2); +GL_APICALL void GL_APIENTRY glProgramUniform4ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3); +GL_APICALL void GL_APIENTRY glProgramUniform1f (GLuint program, GLint location, GLfloat v0); +GL_APICALL void GL_APIENTRY glProgramUniform2f (GLuint program, GLint location, GLfloat v0, GLfloat v1); +GL_APICALL void GL_APIENTRY glProgramUniform3f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2); +GL_APICALL void GL_APIENTRY glProgramUniform4f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3); +GL_APICALL void GL_APIENTRY glProgramUniform1iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4iv (GLuint program, GLint location, GLsizei count, const GLint *value); +GL_APICALL void GL_APIENTRY glProgramUniform1uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform2uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform3uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform4uiv (GLuint program, GLint location, GLsizei count, const GLuint *value); +GL_APICALL void GL_APIENTRY glProgramUniform1fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform2fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform3fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniform4fv (GLuint program, GLint location, GLsizei count, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix2x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix3x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glProgramUniformMatrix4x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value); +GL_APICALL void GL_APIENTRY glValidateProgramPipeline (GLuint pipeline); +GL_APICALL void GL_APIENTRY glGetProgramPipelineInfoLog (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog); +GL_APICALL void GL_APIENTRY glBindImageTexture (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format); +GL_APICALL void GL_APIENTRY glGetBooleani_v (GLenum target, GLuint index, GLboolean *data); +GL_APICALL void GL_APIENTRY glMemoryBarrier (GLbitfield barriers); +GL_APICALL void GL_APIENTRY glMemoryBarrierByRegion (GLbitfield barriers); +GL_APICALL void GL_APIENTRY glTexStorage2DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations); +GL_APICALL void GL_APIENTRY glGetMultisamplefv (GLenum pname, GLuint index, GLfloat *val); +GL_APICALL void GL_APIENTRY glSampleMaski (GLuint maskNumber, GLbitfield mask); +GL_APICALL void GL_APIENTRY glGetTexLevelParameteriv (GLenum target, GLint level, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexLevelParameterfv (GLenum target, GLint level, GLenum pname, GLfloat *params); +GL_APICALL void GL_APIENTRY glBindVertexBuffer (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride); +GL_APICALL void GL_APIENTRY glVertexAttribFormat (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset); +GL_APICALL void GL_APIENTRY glVertexAttribIFormat (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset); +GL_APICALL void GL_APIENTRY glVertexAttribBinding (GLuint attribindex, GLuint bindingindex); +GL_APICALL void GL_APIENTRY glVertexBindingDivisor (GLuint bindingindex, GLuint divisor); +#endif +#endif /* GL_ES_VERSION_3_1 */ + +#ifndef GL_ES_VERSION_3_2 +#define GL_ES_VERSION_3_2 1 +typedef void (GL_APIENTRY *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam); +#define GL_MULTISAMPLE_LINE_WIDTH_RANGE 0x9381 +#define GL_MULTISAMPLE_LINE_WIDTH_GRANULARITY 0x9382 +#define GL_MULTIPLY 0x9294 +#define GL_SCREEN 0x9295 +#define GL_OVERLAY 0x9296 +#define GL_DARKEN 0x9297 +#define GL_LIGHTEN 0x9298 +#define GL_COLORDODGE 0x9299 +#define GL_COLORBURN 0x929A +#define GL_HARDLIGHT 0x929B +#define GL_SOFTLIGHT 0x929C +#define GL_DIFFERENCE 0x929E +#define GL_EXCLUSION 0x92A0 +#define GL_HSL_HUE 0x92AD +#define GL_HSL_SATURATION 0x92AE +#define GL_HSL_COLOR 0x92AF +#define GL_HSL_LUMINOSITY 0x92B0 +#define GL_DEBUG_OUTPUT_SYNCHRONOUS 0x8242 +#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH 0x8243 +#define GL_DEBUG_CALLBACK_FUNCTION 0x8244 +#define GL_DEBUG_CALLBACK_USER_PARAM 0x8245 +#define GL_DEBUG_SOURCE_API 0x8246 +#define GL_DEBUG_SOURCE_WINDOW_SYSTEM 0x8247 +#define GL_DEBUG_SOURCE_SHADER_COMPILER 0x8248 +#define GL_DEBUG_SOURCE_THIRD_PARTY 0x8249 +#define GL_DEBUG_SOURCE_APPLICATION 0x824A +#define GL_DEBUG_SOURCE_OTHER 0x824B +#define GL_DEBUG_TYPE_ERROR 0x824C +#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR 0x824D +#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR 0x824E +#define GL_DEBUG_TYPE_PORTABILITY 0x824F +#define GL_DEBUG_TYPE_PERFORMANCE 0x8250 +#define GL_DEBUG_TYPE_OTHER 0x8251 +#define GL_DEBUG_TYPE_MARKER 0x8268 +#define GL_DEBUG_TYPE_PUSH_GROUP 0x8269 +#define GL_DEBUG_TYPE_POP_GROUP 0x826A +#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B +#define GL_MAX_DEBUG_GROUP_STACK_DEPTH 0x826C +#define GL_DEBUG_GROUP_STACK_DEPTH 0x826D +#define GL_BUFFER 0x82E0 +#define GL_SHADER 0x82E1 +#define GL_PROGRAM 0x82E2 +#define GL_VERTEX_ARRAY 0x8074 +#define GL_QUERY 0x82E3 +#define GL_PROGRAM_PIPELINE 0x82E4 +#define GL_SAMPLER 0x82E6 +#define GL_MAX_LABEL_LENGTH 0x82E8 +#define GL_MAX_DEBUG_MESSAGE_LENGTH 0x9143 +#define GL_MAX_DEBUG_LOGGED_MESSAGES 0x9144 +#define GL_DEBUG_LOGGED_MESSAGES 0x9145 +#define GL_DEBUG_SEVERITY_HIGH 0x9146 +#define GL_DEBUG_SEVERITY_MEDIUM 0x9147 +#define GL_DEBUG_SEVERITY_LOW 0x9148 +#define GL_DEBUG_OUTPUT 0x92E0 +#define GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002 +#define GL_STACK_OVERFLOW 0x0503 +#define GL_STACK_UNDERFLOW 0x0504 +#define GL_GEOMETRY_SHADER 0x8DD9 +#define GL_GEOMETRY_SHADER_BIT 0x00000004 +#define GL_GEOMETRY_VERTICES_OUT 0x8916 +#define GL_GEOMETRY_INPUT_TYPE 0x8917 +#define GL_GEOMETRY_OUTPUT_TYPE 0x8918 +#define GL_GEOMETRY_SHADER_INVOCATIONS 0x887F +#define GL_LAYER_PROVOKING_VERTEX 0x825E +#define GL_LINES_ADJACENCY 0x000A +#define GL_LINE_STRIP_ADJACENCY 0x000B +#define GL_TRIANGLES_ADJACENCY 0x000C +#define GL_TRIANGLE_STRIP_ADJACENCY 0x000D +#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS 0x8DDF +#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS 0x8A2C +#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS 0x8A32 +#define GL_MAX_GEOMETRY_INPUT_COMPONENTS 0x9123 +#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS 0x9124 +#define GL_MAX_GEOMETRY_OUTPUT_VERTICES 0x8DE0 +#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS 0x8DE1 +#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS 0x8E5A +#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29 +#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS 0x92CF +#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS 0x92D5 +#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS 0x90CD +#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS 0x90D7 +#define GL_FIRST_VERTEX_CONVENTION 0x8E4D +#define GL_LAST_VERTEX_CONVENTION 0x8E4E +#define GL_UNDEFINED_VERTEX 0x8260 +#define GL_PRIMITIVES_GENERATED 0x8C87 +#define GL_FRAMEBUFFER_DEFAULT_LAYERS 0x9312 +#define GL_MAX_FRAMEBUFFER_LAYERS 0x9317 +#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS 0x8DA8 +#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED 0x8DA7 +#define GL_REFERENCED_BY_GEOMETRY_SHADER 0x9309 +#define GL_PRIMITIVE_BOUNDING_BOX 0x92BE +#define GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT 0x00000004 +#define GL_CONTEXT_FLAGS 0x821E +#define GL_LOSE_CONTEXT_ON_RESET 0x8252 +#define GL_GUILTY_CONTEXT_RESET 0x8253 +#define GL_INNOCENT_CONTEXT_RESET 0x8254 +#define GL_UNKNOWN_CONTEXT_RESET 0x8255 +#define GL_RESET_NOTIFICATION_STRATEGY 0x8256 +#define GL_NO_RESET_NOTIFICATION 0x8261 +#define GL_CONTEXT_LOST 0x0507 +#define GL_SAMPLE_SHADING 0x8C36 +#define GL_MIN_SAMPLE_SHADING_VALUE 0x8C37 +#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET 0x8E5B +#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET 0x8E5C +#define GL_FRAGMENT_INTERPOLATION_OFFSET_BITS 0x8E5D +#define GL_PATCHES 0x000E +#define GL_PATCH_VERTICES 0x8E72 +#define GL_TESS_CONTROL_OUTPUT_VERTICES 0x8E75 +#define GL_TESS_GEN_MODE 0x8E76 +#define GL_TESS_GEN_SPACING 0x8E77 +#define GL_TESS_GEN_VERTEX_ORDER 0x8E78 +#define GL_TESS_GEN_POINT_MODE 0x8E79 +#define GL_ISOLINES 0x8E7A +#define GL_QUADS 0x0007 +#define GL_FRACTIONAL_ODD 0x8E7B +#define GL_FRACTIONAL_EVEN 0x8E7C +#define GL_MAX_PATCH_VERTICES 0x8E7D +#define GL_MAX_TESS_GEN_LEVEL 0x8E7E +#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E7F +#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E80 +#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS 0x8E81 +#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS 0x8E82 +#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS 0x8E83 +#define GL_MAX_TESS_PATCH_COMPONENTS 0x8E84 +#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS 0x8E85 +#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS 0x8E86 +#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS 0x8E89 +#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS 0x8E8A +#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS 0x886C +#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS 0x886D +#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E1E +#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E1F +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS 0x92CD +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS 0x92CE +#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS 0x92D3 +#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS 0x92D4 +#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS 0x90CB +#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS 0x90CC +#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS 0x90D8 +#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS 0x90D9 +#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED 0x8221 +#define GL_IS_PER_PATCH 0x92E7 +#define GL_REFERENCED_BY_TESS_CONTROL_SHADER 0x9307 +#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER 0x9308 +#define GL_TESS_CONTROL_SHADER 0x8E88 +#define GL_TESS_EVALUATION_SHADER 0x8E87 +#define GL_TESS_CONTROL_SHADER_BIT 0x00000008 +#define GL_TESS_EVALUATION_SHADER_BIT 0x00000010 +#define GL_TEXTURE_BORDER_COLOR 0x1004 +#define GL_CLAMP_TO_BORDER 0x812D +#define GL_TEXTURE_BUFFER 0x8C2A +#define GL_TEXTURE_BUFFER_BINDING 0x8C2A +#define GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B +#define GL_TEXTURE_BINDING_BUFFER 0x8C2C +#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING 0x8C2D +#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT 0x919F +#define GL_SAMPLER_BUFFER 0x8DC2 +#define GL_INT_SAMPLER_BUFFER 0x8DD0 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 +#define GL_IMAGE_BUFFER 0x9051 +#define GL_INT_IMAGE_BUFFER 0x905C +#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067 +#define GL_TEXTURE_BUFFER_OFFSET 0x919D +#define GL_TEXTURE_BUFFER_SIZE 0x919E +#define GL_COMPRESSED_RGBA_ASTC_4x4 0x93B0 +#define GL_COMPRESSED_RGBA_ASTC_5x4 0x93B1 +#define GL_COMPRESSED_RGBA_ASTC_5x5 0x93B2 +#define GL_COMPRESSED_RGBA_ASTC_6x5 0x93B3 +#define GL_COMPRESSED_RGBA_ASTC_6x6 0x93B4 +#define GL_COMPRESSED_RGBA_ASTC_8x5 0x93B5 +#define GL_COMPRESSED_RGBA_ASTC_8x6 0x93B6 +#define GL_COMPRESSED_RGBA_ASTC_8x8 0x93B7 +#define GL_COMPRESSED_RGBA_ASTC_10x5 0x93B8 +#define GL_COMPRESSED_RGBA_ASTC_10x6 0x93B9 +#define GL_COMPRESSED_RGBA_ASTC_10x8 0x93BA +#define GL_COMPRESSED_RGBA_ASTC_10x10 0x93BB +#define GL_COMPRESSED_RGBA_ASTC_12x10 0x93BC +#define GL_COMPRESSED_RGBA_ASTC_12x12 0x93BD +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4 0x93D0 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4 0x93D1 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5 0x93D2 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5 0x93D3 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6 0x93D4 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5 0x93D5 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6 0x93D6 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8 0x93D7 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5 0x93D8 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6 0x93D9 +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8 0x93DA +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10 0x93DB +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10 0x93DC +#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12 0x93DD +#define GL_TEXTURE_CUBE_MAP_ARRAY 0x9009 +#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY 0x900A +#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F +#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054 +#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A +#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102 +#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY 0x9105 +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D +typedef void (GL_APIENTRYP PFNGLBLENDBARRIERPROC) (void); +typedef void (GL_APIENTRYP PFNGLCOPYIMAGESUBDATAPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECONTROLPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGEINSERTPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +typedef void (GL_APIENTRYP PFNGLDEBUGMESSAGECALLBACKPROC) (GLDEBUGPROC callback, const void *userParam); +typedef GLuint (GL_APIENTRYP PFNGLGETDEBUGMESSAGELOGPROC) (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +typedef void (GL_APIENTRYP PFNGLPUSHDEBUGGROUPPROC) (GLenum source, GLuint id, GLsizei length, const GLchar *message); +typedef void (GL_APIENTRYP PFNGLPOPDEBUGGROUPPROC) (void); +typedef void (GL_APIENTRYP PFNGLOBJECTLABELPROC) (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTLABELPROC) (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (GL_APIENTRYP PFNGLOBJECTPTRLABELPROC) (const void *ptr, GLsizei length, const GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETOBJECTPTRLABELPROC) (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +typedef void (GL_APIENTRYP PFNGLGETPOINTERVPROC) (GLenum pname, void **params); +typedef void (GL_APIENTRYP PFNGLENABLEIPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDISABLEIPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONIPROC) (GLuint buf, GLenum mode); +typedef void (GL_APIENTRYP PFNGLBLENDEQUATIONSEPARATEIPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCIPROC) (GLuint buf, GLenum src, GLenum dst); +typedef void (GL_APIENTRYP PFNGLBLENDFUNCSEPARATEIPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +typedef void (GL_APIENTRYP PFNGLCOLORMASKIPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +typedef GLboolean (GL_APIENTRYP PFNGLISENABLEDIPROC) (GLenum target, GLuint index); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +typedef void (GL_APIENTRYP PFNGLFRAMEBUFFERTEXTUREPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level); +typedef void (GL_APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +typedef GLenum (GL_APIENTRYP PFNGLGETGRAPHICSRESETSTATUSPROC) (void); +typedef void (GL_APIENTRYP PFNGLREADNPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMFVPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMIVPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETNUNIFORMUIVPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +typedef void (GL_APIENTRYP PFNGLMINSAMPLESHADINGPROC) (GLfloat value); +typedef void (GL_APIENTRYP PFNGLPATCHPARAMETERIPROC) (GLenum pname, GLint value); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIIVPROC) (GLenum target, GLenum pname, const GLint *params); +typedef void (GL_APIENTRYP PFNGLTEXPARAMETERIUIVPROC) (GLenum target, GLenum pname, const GLuint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIIVPROC) (GLenum target, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETTEXPARAMETERIUIVPROC) (GLenum target, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIIVPROC) (GLuint sampler, GLenum pname, const GLint *param); +typedef void (GL_APIENTRYP PFNGLSAMPLERPARAMETERIUIVPROC) (GLuint sampler, GLenum pname, const GLuint *param); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIIVPROC) (GLuint sampler, GLenum pname, GLint *params); +typedef void (GL_APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVPROC) (GLuint sampler, GLenum pname, GLuint *params); +typedef void (GL_APIENTRYP PFNGLTEXBUFFERPROC) (GLenum target, GLenum internalformat, GLuint buffer); +typedef void (GL_APIENTRYP PFNGLTEXBUFFERRANGEPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +typedef void (GL_APIENTRYP PFNGLTEXSTORAGE3DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#ifdef GL_GLEXT_PROTOTYPES +GL_APICALL void GL_APIENTRY glBlendBarrier (void); +GL_APICALL void GL_APIENTRY glCopyImageSubData (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth); +GL_APICALL void GL_APIENTRY glDebugMessageControl (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); +GL_APICALL void GL_APIENTRY glDebugMessageInsert (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf); +GL_APICALL void GL_APIENTRY glDebugMessageCallback (GLDEBUGPROC callback, const void *userParam); +GL_APICALL GLuint GL_APIENTRY glGetDebugMessageLog (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog); +GL_APICALL void GL_APIENTRY glPushDebugGroup (GLenum source, GLuint id, GLsizei length, const GLchar *message); +GL_APICALL void GL_APIENTRY glPopDebugGroup (void); +GL_APICALL void GL_APIENTRY glObjectLabel (GLenum identifier, GLuint name, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectLabel (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label); +GL_APICALL void GL_APIENTRY glObjectPtrLabel (const void *ptr, GLsizei length, const GLchar *label); +GL_APICALL void GL_APIENTRY glGetObjectPtrLabel (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label); +GL_APICALL void GL_APIENTRY glGetPointerv (GLenum pname, void **params); +GL_APICALL void GL_APIENTRY glEnablei (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDisablei (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glBlendEquationi (GLuint buf, GLenum mode); +GL_APICALL void GL_APIENTRY glBlendEquationSeparatei (GLuint buf, GLenum modeRGB, GLenum modeAlpha); +GL_APICALL void GL_APIENTRY glBlendFunci (GLuint buf, GLenum src, GLenum dst); +GL_APICALL void GL_APIENTRY glBlendFuncSeparatei (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha); +GL_APICALL void GL_APIENTRY glColorMaski (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a); +GL_APICALL GLboolean GL_APIENTRY glIsEnabledi (GLenum target, GLuint index); +GL_APICALL void GL_APIENTRY glDrawElementsBaseVertex (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawRangeElementsBaseVertex (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex); +GL_APICALL void GL_APIENTRY glDrawElementsInstancedBaseVertex (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex); +GL_APICALL void GL_APIENTRY glFramebufferTexture (GLenum target, GLenum attachment, GLuint texture, GLint level); +GL_APICALL void GL_APIENTRY glPrimitiveBoundingBox (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW); +GL_APICALL GLenum GL_APIENTRY glGetGraphicsResetStatus (void); +GL_APICALL void GL_APIENTRY glReadnPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data); +GL_APICALL void GL_APIENTRY glGetnUniformfv (GLuint program, GLint location, GLsizei bufSize, GLfloat *params); +GL_APICALL void GL_APIENTRY glGetnUniformiv (GLuint program, GLint location, GLsizei bufSize, GLint *params); +GL_APICALL void GL_APIENTRY glGetnUniformuiv (GLuint program, GLint location, GLsizei bufSize, GLuint *params); +GL_APICALL void GL_APIENTRY glMinSampleShading (GLfloat value); +GL_APICALL void GL_APIENTRY glPatchParameteri (GLenum pname, GLint value); +GL_APICALL void GL_APIENTRY glTexParameterIiv (GLenum target, GLenum pname, const GLint *params); +GL_APICALL void GL_APIENTRY glTexParameterIuiv (GLenum target, GLenum pname, const GLuint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIiv (GLenum target, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetTexParameterIuiv (GLenum target, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glSamplerParameterIiv (GLuint sampler, GLenum pname, const GLint *param); +GL_APICALL void GL_APIENTRY glSamplerParameterIuiv (GLuint sampler, GLenum pname, const GLuint *param); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIiv (GLuint sampler, GLenum pname, GLint *params); +GL_APICALL void GL_APIENTRY glGetSamplerParameterIuiv (GLuint sampler, GLenum pname, GLuint *params); +GL_APICALL void GL_APIENTRY glTexBuffer (GLenum target, GLenum internalformat, GLuint buffer); +GL_APICALL void GL_APIENTRY glTexBufferRange (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size); +GL_APICALL void GL_APIENTRY glTexStorage3DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations); +#endif +#endif /* GL_ES_VERSION_3_2 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/GLES3/gl3platform.h b/sources/pvrsdk/Include/GLES3/gl3platform.h new file mode 100755 index 00000000..372f93e6 --- /dev/null +++ b/sources/pvrsdk/Include/GLES3/gl3platform.h @@ -0,0 +1,38 @@ +#ifndef __gl3platform_h_ +#define __gl3platform_h_ + +/* +** Copyright (c) 2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* Platform-specific types and definitions for OpenGL ES 3.X gl3.h + * + * Adopters may modify khrplatform.h and this file to suit their platform. + * Please contribute modifications back to Khronos as pull requests on the + * public github repository: + * https://github.com/KhronosGroup/OpenGL-Registry + */ + +#include "KHR/khrplatform.h" + +#ifndef GL_APICALL +#define GL_APICALL KHRONOS_APICALL +#endif + +#ifndef GL_APIENTRY +#define GL_APIENTRY KHRONOS_APIENTRY +#endif + +#endif /* __gl3platform_h_ */ diff --git a/sources/pvrsdk/Include/KHR/khrplatform.h b/sources/pvrsdk/Include/KHR/khrplatform.h new file mode 100755 index 00000000..619eedaa --- /dev/null +++ b/sources/pvrsdk/Include/KHR/khrplatform.h @@ -0,0 +1,289 @@ +#ifndef __khrplatform_h_ +#define __khrplatform_h_ + +/* +** Copyright (c) 2008-2009 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +/* Khronos platform-specific types and definitions. + * + * $Revision: 32517 $ on $Date: 2016-03-11 02:41:19 -0800 (Fri, 11 Mar 2016) $ + * + * Adopters may modify this file to suit their platform. Adopters are + * encouraged to submit platform specific modifications to the Khronos + * group so that they can be included in future versions of this file. + * Please submit changes by sending them to the public Khronos Bugzilla + * (http://khronos.org/bugzilla) by filing a bug against product + * "Khronos (general)" component "Registry". + * + * A predefined template which fills in some of the bug fields can be + * reached using http://tinyurl.com/khrplatform-h-bugreport, but you + * must create a Bugzilla login first. + * + * + * See the Implementer's Guidelines for information about where this file + * should be located on your system and for more details of its use: + * http://www.khronos.org/registry/implementers_guide.pdf + * + * This file should be included as + * #include + * by Khronos client API header files that use its types and defines. + * + * The types in khrplatform.h should only be used to define API-specific types. + * + * Types defined in khrplatform.h: + * khronos_int8_t signed 8 bit + * khronos_uint8_t unsigned 8 bit + * khronos_int16_t signed 16 bit + * khronos_uint16_t unsigned 16 bit + * khronos_int32_t signed 32 bit + * khronos_uint32_t unsigned 32 bit + * khronos_int64_t signed 64 bit + * khronos_uint64_t unsigned 64 bit + * khronos_intptr_t signed same number of bits as a pointer + * khronos_uintptr_t unsigned same number of bits as a pointer + * khronos_ssize_t signed size + * khronos_usize_t unsigned size + * khronos_float_t signed 32 bit floating point + * khronos_time_ns_t unsigned 64 bit time in nanoseconds + * khronos_utime_nanoseconds_t unsigned time interval or absolute time in + * nanoseconds + * khronos_stime_nanoseconds_t signed time interval in nanoseconds + * khronos_boolean_enum_t enumerated boolean type. This should + * only be used as a base type when a client API's boolean type is + * an enum. Client APIs which use an integer or other type for + * booleans cannot use this as the base type for their boolean. + * + * Tokens defined in khrplatform.h: + * + * KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values. + * + * KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0. + * KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0. + * + * Calling convention macros defined in this file: + * KHRONOS_APICALL + * KHRONOS_APIENTRY + * KHRONOS_APIATTRIBUTES + * + * These may be used in function prototypes as: + * + * KHRONOS_APICALL void KHRONOS_APIENTRY funcname( + * int arg1, + * int arg2) KHRONOS_APIATTRIBUTES; + */ + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APICALL + *------------------------------------------------------------------------- + * This precedes the return type of the function in the function prototype. + */ +#if defined(_WIN32) && !defined(__SCITECH_SNAP__) +# if defined (_DLL_EXPORTS) +# define KHRONOS_APICALL __declspec(dllexport) +# else +# define KHRONOS_APICALL __declspec(dllimport) +# endif +#elif defined (__SYMBIAN32__) +# define KHRONOS_APICALL IMPORT_C +#elif defined(__ANDROID__) +# include +# define KHRONOS_APICALL __attribute__((visibility("default"))) +#else +# define KHRONOS_APICALL +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APIENTRY + *------------------------------------------------------------------------- + * This follows the return type of the function and precedes the function + * name in the function prototype. + */ +#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__SCITECH_SNAP__) + /* Win32 but not WinCE */ +# define KHRONOS_APIENTRY __stdcall +#else +# define KHRONOS_APIENTRY +#endif + +/*------------------------------------------------------------------------- + * Definition of KHRONOS_APIATTRIBUTES + *------------------------------------------------------------------------- + * This follows the closing parenthesis of the function prototype arguments. + */ +#if defined (__ARMCC_2__) +#define KHRONOS_APIATTRIBUTES __softfp +#else +#define KHRONOS_APIATTRIBUTES +#endif + +/*------------------------------------------------------------------------- + * basic type definitions + *-----------------------------------------------------------------------*/ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__) + + +/* + * Using + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(__VMS ) || defined(__sgi) + +/* + * Using + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(_WIN32) && !defined(__SCITECH_SNAP__) + +/* + * Win32 + */ +typedef __int32 khronos_int32_t; +typedef unsigned __int32 khronos_uint32_t; +typedef __int64 khronos_int64_t; +typedef unsigned __int64 khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif defined(__sun__) || defined(__digital__) + +/* + * Sun or Digital + */ +typedef int khronos_int32_t; +typedef unsigned int khronos_uint32_t; +#if defined(__arch64__) || defined(_LP64) +typedef long int khronos_int64_t; +typedef unsigned long int khronos_uint64_t; +#else +typedef long long int khronos_int64_t; +typedef unsigned long long int khronos_uint64_t; +#endif /* __arch64__ */ +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#elif 0 + +/* + * Hypothetical platform with no float or int64 support + */ +typedef int khronos_int32_t; +typedef unsigned int khronos_uint32_t; +#define KHRONOS_SUPPORT_INT64 0 +#define KHRONOS_SUPPORT_FLOAT 0 + +#else + +/* + * Generic fallback + */ +#include +typedef int32_t khronos_int32_t; +typedef uint32_t khronos_uint32_t; +typedef int64_t khronos_int64_t; +typedef uint64_t khronos_uint64_t; +#define KHRONOS_SUPPORT_INT64 1 +#define KHRONOS_SUPPORT_FLOAT 1 + +#endif + + +/* + * Types that are (so far) the same on all platforms + */ +typedef signed char khronos_int8_t; +typedef unsigned char khronos_uint8_t; +typedef signed short int khronos_int16_t; +typedef unsigned short int khronos_uint16_t; + +/* + * Types that differ between LLP64 and LP64 architectures - in LLP64, + * pointers are 64 bits, but 'long' is still 32 bits. Win64 appears + * to be the only LLP64 architecture in current use. + */ +#ifdef _WIN64 +typedef signed long long int khronos_intptr_t; +typedef unsigned long long int khronos_uintptr_t; +typedef signed long long int khronos_ssize_t; +typedef unsigned long long int khronos_usize_t; +#else +typedef signed long int khronos_intptr_t; +typedef unsigned long int khronos_uintptr_t; +typedef signed long int khronos_ssize_t; +typedef unsigned long int khronos_usize_t; +#endif + +#if KHRONOS_SUPPORT_FLOAT +/* + * Float type + */ +typedef float khronos_float_t; +#endif + +#if KHRONOS_SUPPORT_INT64 +/* Time types + * + * These types can be used to represent a time interval in nanoseconds or + * an absolute Unadjusted System Time. Unadjusted System Time is the number + * of nanoseconds since some arbitrary system event (e.g. since the last + * time the system booted). The Unadjusted System Time is an unsigned + * 64 bit value that wraps back to 0 every 584 years. Time intervals + * may be either signed or unsigned. + */ +typedef khronos_uint64_t khronos_utime_nanoseconds_t; +typedef khronos_int64_t khronos_stime_nanoseconds_t; +#endif + +/* + * Dummy value used to pad enum types to 32 bits. + */ +#ifndef KHRONOS_MAX_ENUM +#define KHRONOS_MAX_ENUM 0x7FFFFFFF +#endif + +/* + * Enumerated boolean type + * + * Values other than zero should be considered to be true. Therefore + * comparisons should not be made against KHRONOS_TRUE. + */ +typedef enum { + KHRONOS_FALSE = 0, + KHRONOS_TRUE = 1, + KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM +} khronos_boolean_enum_t; + +#endif /* __khrplatform_h_ */ diff --git a/sources/pvrsdk/Include/PVRScopeComms.h b/sources/pvrsdk/Include/PVRScopeComms.h new file mode 100755 index 00000000..1567efb2 --- /dev/null +++ b/sources/pvrsdk/Include/PVRScopeComms.h @@ -0,0 +1,297 @@ +/*!*********************************************************************** + + @file PVRScopeComms.h + @copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved. + @brief PVRScopeComms header file. @copybrief ScopeComms + +**************************************************************************/ + +#ifndef _PVRSCOPECOMMS_H_ +#define _PVRSCOPECOMMS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + PVRPerfServer and PVRTune communications +*/ + +/*! + @addtogroup ScopeComms PVRScopeComms + @brief The PVRScopeComms functionality of PVRScope allows an application to send user defined information to + PVRTune via PVRPerfServer, both as counters and marks, or as editable data that can be + passed back to the application. + @details PVRScopeComms has the following limitations: + \li PVRPerfServer must be running on the host device if a @ref ScopeComms enabled + application wishes to send custom counters or marks to PVRTune. If the application in + question also wishes to communicate with PVRScopeServices without experiencing any + undesired behaviour PVRPerfServer should be run with the '--disable-hwperf' flag. + \li The following types may be sent: Boolean, Enumerator, Float, Integer, String. + @{ +*/ + +/**************************************************************************** +** Enums +****************************************************************************/ + +/*!************************************************************************** + @enum ESPSCommsLibType + @brief Each editable library item has a data type associated with it +****************************************************************************/ +/// +enum ESPSCommsLibType +{ + eSPSCommsLibTypeString, ///< data is string (NOT NULL-terminated, use length parameter) + eSPSCommsLibTypeFloat, ///< data is SSPSCommsLibraryTypeFloat + eSPSCommsLibTypeInt, ///< data is SSPSCommsLibraryTypeInt + eSPSCommsLibTypeEnum, ///< data is string (NOT NULL-terminated, use length parameter). First line is selection number, subsequent lines are available options. + eSPSCommsLibTypeBool ///< data is SSPSCommsLibraryTypeBool +}; + +/**************************************************************************** +** Structures +****************************************************************************/ + +// Internal implementation data +struct SSPSCommsData; + +/*!************************************************************************** + @struct SSPSCommsLibraryItem + @brief Definition of one editable library item +****************************************************************************/ +struct SSPSCommsLibraryItem +{ + + const char *pszName; ///< Item name. If dots are used, PVRTune could show these as a foldable tree view. + unsigned int nNameLength; ///< Item name length + + enum ESPSCommsLibType eType; ///< Item type + + const char *pData; ///< Item data + unsigned int nDataLength; ///< Item data length +}; + +/*!************************************************************************** + @struct SSPSCommsLibraryTypeFloat + @brief Current, minimum and maximum values for an editable library item of type float +****************************************************************************/ +struct SSPSCommsLibraryTypeFloat +{ + float fCurrent; ///< Current value + float fMin; ///< Minimal value + float fMax; ///< Maximum value +}; + +/*!************************************************************************** + @struct SSPSCommsLibraryTypeInt + @brief Current, minimum and maximum values for an editable library item of type int +****************************************************************************/ +struct SSPSCommsLibraryTypeInt +{ + int nCurrent; ///< Current value + int nMin; ///< Minimal value + int nMax; ///< Maximum value +}; + +/*!************************************************************************** + @struct SSPSCommsLibraryTypeBool + @brief Current value for an editable library item of type bool +****************************************************************************/ +struct SSPSCommsLibraryTypeBool +{ + int nBoolValue; ///< Boolean value (zero = false) +}; + +/*!************************************************************************** + @struct SSPSCommsCounterDef + @brief Definition of one custom counter +****************************************************************************/ +struct SSPSCommsCounterDef +{ + const char *pszName; ///< Custom counter name + unsigned int nNameLength; ///< Custom counter name length +}; + +/**************************************************************************** +** Declarations +****************************************************************************/ + + +/*!************************************************************************** + @brief Initialise @ref ScopeComms + @return @ref ScopeComms data. +****************************************************************************/ +struct SSPSCommsData *pplInitialise( + const char * const psName, ///< String to describe the application + const unsigned int nNameLen ///< String length +); + +/*!************************************************************************** + @brief Shutdown or de-initialise the remote control section of PVRScope. +****************************************************************************/ +void pplShutdown( + struct SSPSCommsData *psData ///< Context data +); + +/*!************************************************************************** + @brief Optional function. Sleeps until there is a connection to + PVRPerfServer, or time-out. Normally, each thread will wait for + its own connection, and each time-out will naturally happen in + parallel. But if a thread happens to have multiple connections, + N, then waiting for them all [in serial] with time-out M would + take N*M ms if they were all to time-out (e.g. PVRPerfServer is + not running); therefore this function, is designed to allow an + entire array of connections to be waited upon simultaneously. +****************************************************************************/ +void pplWaitForConnection( + struct SSPSCommsData * const psData, ///< Array of context data pointers + int * const pnBoolResults, ///< Array of results - false (0) if timeout + const unsigned int nCount, ///< Array length + const unsigned int nTimeOutMS ///< Time-out length in milliseconds +); + +/*!************************************************************************** + @brief Query for the time. Units are microseconds, resolution is undefined. +****************************************************************************/ +unsigned int pplGetTimeUS( + struct SSPSCommsData * const psData ///< Context data +); + +/*!************************************************************************** + @brief Send a time-stamped string marker to be displayed in PVRTune. + @details Examples might be: + \li switching to outdoor renderer + \li starting benchmark test N +****************************************************************************/ +int pplSendMark( + struct SSPSCommsData * const psData, ///< Context data + const char * const psString, ///< String to send + const unsigned int nLen ///< String length +); + +/*!************************************************************************** + @brief Send a time-stamped begin marker to PVRTune. + @details Every begin must at some point be followed by an end; begin/end + pairs can be nested. PVRTune will show these as an activity + timeline, using a "flame graph" style when there is nesting. + See also the CPPLProcessingScoped helper class. +****************************************************************************/ +int pplSendProcessingBegin( + struct SSPSCommsData * const psData, ///< Context data + const char * const psString, ///< Name of the processing block + const unsigned int nLen, ///< String length + const unsigned int nFrame ///< Iteration (or frame) number, by which processes can be grouped. +); + +/*!************************************************************************** + @brief Send a time-stamped end marker to PVRTune. + @details Every begin must at some point be followed by an end; begin/end + pairs can be nested. PVRTune will show these as an activity + timeline, using a "flame graph" style when there is nesting. + See also the CPPLProcessingScoped helper class. +****************************************************************************/ +int pplSendProcessingEnd( + struct SSPSCommsData * const psData ///< Context data +); + +/*!************************************************************************** + @brief Create a library of remotely editable items +****************************************************************************/ +int pplLibraryCreate( + struct SSPSCommsData * const psData, ///< Context data + const struct SSPSCommsLibraryItem * const pItems, ///< Editable items + const unsigned int nItemCount ///< Number of items +); + +/*!************************************************************************** + @brief Query to see whether a library item has been edited, and also + retrieve the new data. +****************************************************************************/ +int pplLibraryDirtyGetFirst( + struct SSPSCommsData * const psData, ///< Context data + unsigned int * const pnItem, ///< Item number + unsigned int * const pnNewDataLen, ///< New data length + const char **ppData ///< New data +); + +/*!************************************************************************** + @brief Specify the number of custom counters and their definitions +****************************************************************************/ +int pplCountersCreate( + struct SSPSCommsData * const psData, ///< Context data + const struct SSPSCommsCounterDef * const psCounterDefs, ///< Counter definitions + const unsigned int nCount ///< Number of counters +); + +/*!************************************************************************** + @brief Send an update for all the custom counters. The + psCounterReadings array must be nCount long. +****************************************************************************/ +int pplCountersUpdate( + struct SSPSCommsData * const psData, ///< Context data + const unsigned int * const psCounterReadings ///< Counter readings array +); + +/*!************************************************************************** + @brief Force a cache flush. + @details Some implementations store data sends in the cache. If the data + rate is low, the real send of data can be significantly + delayed. + If it is necessary to flush the cache, the best results are + likely to be achieved by calling this function with a frequency + between once per second up to once per frame. If data is sent + extremely infrequently, this function could be called once at + the end of each bout of data send. +****************************************************************************/ +int pplSendFlush( + struct SSPSCommsData * const psData ///< Context data +); + +/*! @} */ + +#ifdef __cplusplus +} + +/*!************************************************************************** +@class CPPLProcessingScoped +@brief Helper class which will send a processing begin/end pair around + its scope. You would typically instantiate these at the top of + a function or after the opening curly-brace of a new scope + within a function. +****************************************************************************/ +class CPPLProcessingScoped +{ +protected: + SSPSCommsData * const m_psData; ///< Context data + +public: + CPPLProcessingScoped( + SSPSCommsData * const psData, ///< Context data + const char * const psString, ///< Name of the processing block + const unsigned int nLen, ///< String length + const unsigned int nFrame=0 ///< Iteration (or frame) number, by which processes can be grouped. + ) + : m_psData(psData) + { + if (m_psData) + pplSendProcessingBegin(m_psData, psString, nLen, nFrame); + } + + ~CPPLProcessingScoped() + { + if (m_psData) + pplSendProcessingEnd(m_psData); + } + +private: + CPPLProcessingScoped(const CPPLProcessingScoped&); // Prevent copy-construction + CPPLProcessingScoped& operator=(const CPPLProcessingScoped&); // Prevent assignment +}; +#endif + +#endif /* _PVRSCOPECOMMS_H_ */ + +/***************************************************************************** + End of file (PVRScopeComms.h) +*****************************************************************************/ diff --git a/sources/pvrsdk/Include/PVRScopeStats.h b/sources/pvrsdk/Include/PVRScopeStats.h new file mode 100755 index 00000000..a40a2c2b --- /dev/null +++ b/sources/pvrsdk/Include/PVRScopeStats.h @@ -0,0 +1,282 @@ +/*!*********************************************************************** + + @file PVRScopeStats.h + @copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved. + @brief PVRScopeStats header file. @copybrief ScopeStats + +**************************************************************************/ + +/*! @mainpage PVRScope + + @section overview Library Overview +***************************** +PVRScope is a utility library which has two functionalities: + \li @ref ScopeStats is used to access the hardware performance counters in + PowerVR hardware via a driver library called PVRScopeServices. + \li @ref ScopeComms allows an application to send user defined information to + PVRTune via PVRPerfServer, both as counters and marks, or as editable data that can be + passed back to the application. + +PVRScope is supplied in the PVRScope.h header file. Your application also needs to link to the PVRScope +library file, either a .lib, .so, or .dy file, depending on your platform. + +For more information on PVRScope, see the PVRScope User Manual. + + @subsection limitStats PVRScopeStats Limitations +***************************** +@copydetails ScopeStats + + @subsection limitComms PVRScopeComms Limitations +***************************** +@copydetails ScopeComms +*/ + +#ifndef _PVRSCOPESTATS_H_ +#define _PVRSCOPESTATS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + @addtogroup ScopeStats PVRScopeStats + @brief The PVRScopeStats functionality of PVRScope is used to access the hardware performance counters in + PowerVR hardware via a driver library called PVRScopeServices. + @details PVRScopeStats has the following limitations: + \li Only one instance of @ref ScopeStats may communicate with PVRScopeServices at any + given time. If a PVRScope enabled application attempts to communicate with + PVRScopeServices at the same time as another such application, or at the same time as + PVRPerfServer, conflicts can occur that may make performance data unreliable. + \li Performance counters can only be read on devices whose drivers have been built with + hardware profiling enabled. This configuration is the default in most production drivers due to negligible overhead. + \li Performance counters contain the average value of that counter since the last time the counter was interrogated. + @{ +*/ + +/**************************************************************************** +** Includes +****************************************************************************/ + +/**************************************************************************** +** Enums +****************************************************************************/ + +/*!************************************************************************** + @enum EPVRScopeInitCode + @brief PVRScope initialisation return codes. +****************************************************************************/ +enum EPVRScopeInitCode +{ + ePVRScopeInitCodeOk, ///< Initialisation OK + ePVRScopeInitCodeOutOfMem, ///< Out of memory + ePVRScopeInitCodeDriverSupportNotFound, ///< Driver support not found + ePVRScopeInitCodeDriverSupportInsufficient, ///< Driver support insufficient + ePVRScopeInitCodeDriverSupportInitFailed, ///< Driver support initialisation failed + ePVRScopeInitCodeDriverSupportQueryInfoFailed ///< Driver support information query failed +}; + +/*!************************************************************************** + @enum EPVRScopeStandardCounter + @brief Set of "standard" counters, just a few of the total list of + counters. +****************************************************************************/ +enum EPVRScopeStandardCounter +{ + ePVRScopeStandardCounter_FPS, ///< Total device FPS + ePVRScopeStandardCounter_Load_2D, ///< 2D core load + ePVRScopeStandardCounter_Load_Renderer, ///< Renderer core load + ePVRScopeStandardCounter_Load_Tiler, ///< Tiler core load + ePVRScopeStandardCounter_Load_Compute, ///< Compute core load + ePVRScopeStandardCounter_Load_Shader_Pixel, ///< Shader core load due to pixels + ePVRScopeStandardCounter_Load_Shader_Vertex, ///< Shader core load due to vertices + ePVRScopeStandardCounter_Load_Shader_Compute, ///< Shader core load due to compute +}; + +/*!************************************************************************** + @enum EPVRScopeEvent + @brief Set of PVRScope event types. +****************************************************************************/ +enum EPVRScopeEvent +{ + ePVRScopeEventComputeBegin, ///< Compute begin + ePVRScopeEventComputeEnd, ///< Compute end + ePVRScopeEventTABegin, ///< TA begin + ePVRScopeEventTAEnd, ///< TA end + ePVRScopeEvent3DBegin, ///< 3D begin + ePVRScopeEvent3DEnd, ///< 3D end + ePVRScopeEvent2DBegin, ///< 2D begin + ePVRScopeEvent2DEnd, ///< 2D end + ePVRScopeEventRTUBegin, ///< RTU begin + ePVRScopeEventRTUEnd, ///< RTU end + ePVRScopeEventSHGBegin, ///< SHG begin + ePVRScopeEventSHGEnd, ///< SHG end +}; + +/**************************************************************************** +** Structures +****************************************************************************/ + +// Internal implementation data +struct SPVRScopeImplData; + +/*!************************************************************************** + @struct SPVRScopeCounterDef + @brief Definition of a counter that PVRScope calculates. +****************************************************************************/ +struct SPVRScopeCounterDef +{ + const char *pszName; ///< Counter name, null terminated + int nBoolPercentage; ///< true if the counter is a percentage + unsigned int nGroup; ///< The counter group that the counter is in. +}; + +/*!************************************************************************** + @struct SPVRScopeCounterReading + @brief A set of return values resulting from querying the counter values. +****************************************************************************/ +struct SPVRScopeCounterReading +{ + float *pfValueBuf; ///< Array of returned values + unsigned int nValueCnt; ///< Number of values set in the above array + unsigned int nReadingActiveGroup; ///< Group that was active when counters were sampled +}; + +/*!************************************************************************** + @struct SPVRScopeGetInfo + @brief A set of return values holding miscellaneous PVRScope information. +****************************************************************************/ +struct SPVRScopeGetInfo +{ + unsigned int nGroupMax; ///< Highest group number of any counter +}; + +/*!************************************************************************** +@struct SPVRScopeTimingPacket +@brief A start or end time. +****************************************************************************/ +struct SPVRScopeTimingPacket +{ + enum EPVRScopeEvent eEventType; ///< Event type + double dTime; ///< Event time (seconds) + unsigned int nPID; ///< Event PID +}; + +/**************************************************************************** +** Declarations +****************************************************************************/ + +const char *PVRScopeGetDescription(); ///< Query the PVRScope library description + + +/*!************************************************************************** + @brief Initialise @ref ScopeStats, to access the HW performance counters in PowerVR. + @return EPVRScopeInitCodeOk on success. +****************************************************************************/ +enum EPVRScopeInitCode PVRScopeInitialise( + struct SPVRScopeImplData **ppsData ///< Context data +); + +/*!************************************************************************** + @brief Shutdown or de-initalise @ref ScopeStats and free the allocated memory. +***************************************************************************/ +void PVRScopeDeInitialise( + struct SPVRScopeImplData **ppsData, ///< Context data + struct SPVRScopeCounterDef **ppsCounters, ///< Array of counters + struct SPVRScopeCounterReading * const psReading ///< Results memory area +); + +/*!************************************************************************** + @brief Query for @ref ScopeStats information. This function should only be called during initialisation. +****************************************************************************/ +void PVRScopeGetInfo( + struct SPVRScopeImplData * const psData, ///< Context data + struct SPVRScopeGetInfo * const psInfo ///< Returned information +); + +/*!************************************************************************** + @brief Query for the list of @ref ScopeStats HW performance counters, and + allocate memory in which the counter values will be received. This function + should only be called during initialisation. +****************************************************************************/ +int PVRScopeGetCounters( + struct SPVRScopeImplData * const psData, ///< Context data + unsigned int * const pnCount, ///< Returned number of counters + struct SPVRScopeCounterDef **ppsCounters, ///< Returned counter array + struct SPVRScopeCounterReading * const psReading ///< Pass a pointer to the structure to be initialised +); + +/*!************************************************************************** + @brief Helper function to query for the counter index of one of a set + of "standard" counters. The index will be into the results array + (from PVRScopeReadCounters) not into the counter array (from + PVRScopeGetCounters) +****************************************************************************/ +unsigned int PVRScopeFindStandardCounter( + const unsigned int nCount, ///< Returned number of counters, from PVRScopeGetCounters + const struct SPVRScopeCounterDef * const psCounters, ///< Returned counter array, from PVRScopeGetCounters + const unsigned int nGroup, ///< Group that will be active + enum EPVRScopeStandardCounter eCounter ///< Counter to be found +); + +/*!************************************************************************** + @brief Call regularly to allow PVRScope to track the latest hardware + performance data. If psReading is not NULL, PVRScope will also + calculate and return counter values to the application. + @details Returns 0 if no data is currently available; psReading will + not be filled with valid data. Try again later. + This function should be called "regularly"; two use cases are + considered: + 1) A 3D application rendering a performance HUD (e.g. the on- + screen graphs in PVRScopeExample). Such an application should + call this function at least once per frame in order to gather + new counter values. If slower HUD updates are desired, + psReading may be NULL until a new reading is required, in + order to smooth out values across longer time periods, perhaps + a number of frames. + 2) A standalone performance monitor (e.g. PVRMonitor) or + logging application. Such an application should idle and + regularly wake up to call this function; suggested rates are + 100Hz (10ms delays) or 200Hz (5ms delays). If counter updates + are required at a lower rate, set psReading to NULL on all + calls except when new counter values are desired. +****************************************************************************/ +int PVRScopeReadCounters( + struct SPVRScopeImplData * const psData, ///< Context data + struct SPVRScopeCounterReading * const psReading ///< Returned data will be filled into the pointed-to structure +); + +/*!************************************************************************** + @brief Request a new HW counter group. + @details Changing the Active HW Group: the API is designed to allow the + HW group to be changed immediately after gathering a reading. +****************************************************************************/ +void PVRScopeSetGroup( + struct SPVRScopeImplData * const psData, ///< Context data + const unsigned int nGroup ///< New group +); + +/*!************************************************************************** + @brief Retrieve the timing data packets. + @details This function can be called periodically if you wish to access + the start and end times of tasks running on the GPU. + The first time this function is called will enable the feature; + from then on data will be stored. If you wish to call this + function once only at the end of a test run, call it once also + prior to the test run. +****************************************************************************/ +const struct SPVRScopeTimingPacket *PVRScopeReadTimingData( + struct SPVRScopeImplData * const psData, ///< Context data + unsigned int * const pnCount ///< Returned number of packets +); + +/*! @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* _PVRSCOPESTATS_H_ */ + +/***************************************************************************** + End of file (PVRScopeStats.h) +*****************************************************************************/ diff --git a/sources/pvrsdk/Include/pvr_openlib.h b/sources/pvrsdk/Include/pvr_openlib.h new file mode 100755 index 00000000..2cc08c9f --- /dev/null +++ b/sources/pvrsdk/Include/pvr_openlib.h @@ -0,0 +1,413 @@ +#pragma once +#include +#include + +#if defined(__linux__) || defined(__ANDROID__) || defined(__QNXNTO__) || defined(__APPLE__) +#include +#include +#endif + +#if defined(_WIN32) +#define WIN32_LEAN_AND_MIN_AND_MAX +#define NOMINMAX +#include +#include +#include +#endif + +#if defined(_PVR_LOG_H) +#define Log_Info(...) ((void)Log(LogLevel::Information, __VA_ARGS__)) +#define Log_Warning(...) ((void)Log(LogLevel::Warning, __VA_ARGS__)) +#define Log_Error(...) ((void)Log(LogLevel::Error, __VA_ARGS__)) +#else +#if defined(__ANDROID__) +#define _ANDROID 1 +#include +#define Log_Info(...) ((void)__android_log_print(ANDROID_LOG_INFO, "com.imgtec.vk", __VA_ARGS__)) +#define Log_Warning(...) ((void)__android_log_print(ANDROID_LOG_WARN, "com.imgtec.vk", __VA_ARGS__)) +#define Log_Error(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "com.imgtec.vk", __VA_ARGS__)) +#elif defined(_WIN32) +static const char* procAddressMessageTypes[] = { + "INFORMATION: ", + "WARNING:", + "ERROR: ", +}; + +inline void logOutput(const int logLevel, const char* const formatString, va_list argumentList) +{ + static char buffer[4096]; + va_list tempList; + memset(buffer, 0, sizeof(buffer)); + +#if (defined _MSC_VER) // Pre VS2013 + tempList = argumentList; +#else + va_copy(tempList, argumentList); +#endif + + vsnprintf(buffer, 4095, formatString, argumentList); + +#if defined(_WIN32) && !defined(_CONSOLE) + if (IsDebuggerPresent()) + { + OutputDebugString(procAddressMessageTypes[logLevel]); + OutputDebugString(buffer); + OutputDebugString("\n"); + } +#else + printf("%s", procAddressMessageTypes[error]); + vprintf(formatString, tempList); + printf("\n"); +#endif +} + +inline void Log_Info(const char* const formatString, ...) +{ + va_list argumentList; + va_start(argumentList, formatString); + logOutput(0, formatString, argumentList); + va_end(argumentList); +} +inline void Log_Warning(const char* const formatString, ...) +{ + va_list argumentList; + va_start(argumentList, formatString); + logOutput(1, formatString, argumentList); + va_end(argumentList); +} +inline void Log_Error(const char* const formatString, ...) +{ + va_list argumentList; + va_start(argumentList, formatString); + logOutput(2, formatString, argumentList); + va_end(argumentList); +} +#else +#define Log_Info(...) ((void)printf(__VA_ARGS__)) +#define Log_Warning(...) ((void)fprintf(stderr, __VA_ARGS__)) +#define Log_Error(...) ((void)fprintf(stderr, __VA_ARGS__)) +#endif +#endif + +/** ABSTRACT THE PLATFORM SPEFCIFIC LIBRARY LOADING FUNCTIONS **/ +#if defined(_WIN32) + +namespace pvr { +namespace lib { +typedef HINSTANCE LIBTYPE; +} +} // namespace pvr + +namespace pvr { +namespace internal { +inline pvr::lib::LIBTYPE OpenLibrary(const char* pszPath) +{ +#if defined(_UNICODE) // UNDER_CE + if (!pszPath) + { + Log_Error("Path must be valid '%s'", pszPath); + return nullptr; + } + + // Get full path of executable + wchar_t pszPathW[_MAX_PATH]; + + // Convert char to wchar + DWORD i = 0; + + for (i = 0; i <= strlen(pszPath); ++i) + { + pszPathW[i] = static_cast(pszPath[i]); + } + + pszPathW[i] = ' '; + pvr::lib::LIBTYPE hostLib = LoadLibraryW(pszPathW); +#else + pvr::lib::LIBTYPE hostLib = LoadLibraryA(pszPath); +#endif + if (!hostLib) + { + Log_Error("Could not load host library '%s'", pszPath); + } + Log_Info("Host library '%s' loaded", pszPath); + return hostLib; +} + +inline void CloseLibrary(pvr::lib::LIBTYPE hostLib) +{ + FreeLibrary(hostLib); +} + +inline void* getLibraryFunction(pvr::lib::LIBTYPE hostLib, const char* pszName) +{ + if (hostLib) + { +#if defined(UNDER_CE) + return win32::GetProcAddressA(hostLib, pszName); +#else + return GetProcAddress(hostLib, pszName); +#endif + } + return nullptr; +} +} // namespace internal +} // namespace pvr +#elif defined(__linux__) || defined(__QNXNTO__) || defined(__APPLE__) +#if defined(__APPLE__) +#if !TARGET_OS_IPHONE +#include "CoreFoundation/CoreFoundation.h" +static const char* g_pszEnvVar = "PVRTRACE_LIB_PATH"; +inline void* OpenFramework(const char* pszPath) +{ + CFBundleRef mainBundle = CFBundleGetMainBundle(); + CFURLRef resourceURL = CFBundleCopyPrivateFrameworksURL(mainBundle); + char path[PATH_MAX]; + if (!CFURLGetFileSystemRepresentation(resourceURL, TRUE, (UInt8*)path, PATH_MAX)) + { + return 0; + } + CFRelease(resourceURL); + + { + void* lib = NULL; + + // --- Set a global environment variable to point to this path (for VFrame usage) + const char* slash = strrchr(pszPath, '/'); + if (slash) + { + char szPath[FILENAME_MAX]; + memset(szPath, 0, sizeof(szPath)); + strncpy(szPath, pszPath, slash - pszPath); + setenv(g_pszEnvVar, szPath, 1); + } + else + { + // Use the current bundle path + std::string framework = std::string(path) + "/../Frameworks/"; + setenv(g_pszEnvVar, framework.c_str(), 1); + } + + // --- Make a temp symlink + char szTempFile[FILENAME_MAX]; + memset(szTempFile, 0, sizeof(szTempFile)); + + char tmpdir[PATH_MAX]; + size_t n = confstr(_CS_DARWIN_USER_TEMP_DIR, tmpdir, sizeof(tmpdir)); + if ((n <= 0) || (n >= sizeof(tmpdir))) + { + strlcpy(tmpdir, getenv("TMPDIR"), sizeof(tmpdir)); + } + + strcat(szTempFile, tmpdir); + strcat(szTempFile, "tmp.XXXXXX"); + + if (mkstemp(szTempFile)) + { + if (symlink(pszPath, szTempFile) == 0) + { + lib = dlopen(szTempFile, RTLD_LAZY | RTLD_GLOBAL); + remove(szTempFile); + } + } + + // --- Can't find the lib? Check the application framework folder instead. + if (!lib) + { + std::string framework = std::string(path) + std::string("/") + pszPath; + lib = dlopen(framework.c_str(), RTLD_LAZY | RTLD_GLOBAL); + + if (!lib) + { + const char* err = dlerror(); + if (err) + { + // NSLog(@"dlopen failed with error: %s => %@", err, framework); + } + } + } + + return lib; + } +} +#endif +#endif + +namespace pvr { +namespace lib { +typedef void* LIBTYPE; +} +} // namespace pvr + +namespace pvr { +namespace internal { +#if defined(__APPLE__) && !TARGET_OS_IPHONE +inline pvr::lib::LIBTYPE OpenLibrary(const char* pszPath) +{ + // An objective-C function that uses dlopen + pvr::lib::LIBTYPE hostLib = OpenFramework(pszPath); + if (!hostLib) + { + Log_Error("Could not load§ host library '%s'", pszPath); + } + Log_Info("Host library '%s' loaded", pszPath); + return hostLib; +} +#else +namespace { +inline pvr::lib::LIBTYPE OpenLibrary_Helper(const char* pszPath) +{ + pvr::lib::LIBTYPE hostLib = dlopen(pszPath, RTLD_LAZY | RTLD_GLOBAL); + if (!hostLib) + { + char pathMod[256]; + strcpy(pathMod, "./"); + strcat(pathMod, pszPath); + + hostLib = dlopen(pathMod, RTLD_LAZY | RTLD_GLOBAL); + } + return hostLib; +} +} // namespace + +inline pvr::lib::LIBTYPE OpenLibrary(const char* pszPath) +{ + size_t start = 0; + std::string tmp; + std::string LibPath(pszPath); + pvr::lib::LIBTYPE hostLib = nullptr; + + while (!hostLib) + { + size_t end = LibPath.find_first_of(';', start); + + if (end == std::string::npos) + { + tmp = LibPath.substr(start, LibPath.length() - start); + } + else + { + tmp = LibPath.substr(start, end - start); + } + + if (!tmp.empty()) + { + hostLib = OpenLibrary_Helper(tmp.c_str()); + + if (!hostLib) + { + // Remove the last character in case a new line character snuck in + tmp = tmp.substr(0, tmp.size() - 1); + hostLib = OpenLibrary_Helper(tmp.c_str()); + } + } + + if (end == std::string::npos) + { + break; + } + + start = end + 1; + } + if (!hostLib) + { + const char* err = dlerror(); + if (err) + { + Log_Error("Could not load host library '%s'", pszPath); + Log_Error("dlopen failed with error '%s'", err); + } + } + Log_Info("Host library '%s' loaded", pszPath); + + return hostLib; +} + +#endif + +inline void CloseLibrary(pvr::lib::LIBTYPE hostLib) +{ + dlclose(hostLib); +} + +inline void* getLibraryFunction(pvr::lib::LIBTYPE hostLib, const char* pszName) +{ + if (hostLib) + { + void* func = dlsym(hostLib, pszName); + return func; + } + return nullptr; +} +} +} // namespace pvr +#elif defined(ANDROID) + +namespace pvr { +namespace internal { +inline pvr::lib::LIBTYPE OpenLibrary(const char* pszPath) +{ + pvr::lib::LIBTYPE hostLib = dlopen(pszPath, RTLD_LAZY | RTLD_GLOBAL); + if (!hostLib) + { + const char* err = dlerror(); + if (err) + { + Log_Error("Could not load host library '%s'", pszPath); + Log_Error("dlopen failed with error '%s'", err); + } + } + Log_Info("Host library '%s' loaded", pszPath); + return hostLib; +} + +inline void CloseLibrary(pvr::lib::LIBTYPE hostLib) +{ + dlclose(hostLib); +} + +inline void* getLibraryFunction(pvr::lib::LIBTYPE hostLib, const char* pszName) +{ + void* fnct = dlsym(hostLib, pszName); + return fnct; +} +} // namespace internal +} // namespace pvr +#else +#error Unsupported platform +#endif + +namespace pvr { +namespace lib { +static pvr::lib::LIBTYPE hostLib = nullptr; +static std::string libraryName; + +static inline pvr::lib::LIBTYPE openlib(const std::string& libName) +{ + libraryName = libName; + hostLib = pvr::internal::OpenLibrary(libName.c_str()); + return hostLib; +} + +static inline void closelib(pvr::lib::LIBTYPE lib) +{ + pvr::internal::CloseLibrary(lib); +} + +template +PtrType_ inline getLibFunction(pvr::lib::LIBTYPE hostLib, const std::string& functionName) +{ + return reinterpret_cast(pvr::internal::getLibraryFunction(hostLib, functionName.c_str())); +} + +template +PtrType_ inline getLibFunctionChecked(pvr::lib::LIBTYPE hostLib, const std::string& functionName) +{ + PtrType_ func = getLibFunction(hostLib, functionName); + if (!func) + { + Log_Error("Failed to load function [%s] from library '%s'.\n", functionName.c_str(), libraryName.c_str()); + } + return func; +} +} // namespace lib +} // namespace pvr diff --git a/sources/pvrsdk/Include/sdkver.h b/sources/pvrsdk/Include/sdkver.h new file mode 100755 index 00000000..92e0ae46 --- /dev/null +++ b/sources/pvrsdk/Include/sdkver.h @@ -0,0 +1,40 @@ +/*!*************************************************************************** + @File sdkver.h + + @Title Version numbers and strings. + + @Date 08/11/2011 + + @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved + + @Platform Independant + + @Description Version numbers and strings for SDK components. + +*****************************************************************************/ + +#ifndef __SDKVER_H_ +#define __SDKVER_H_ + +/*!*************************************************************************** + Defines +*****************************************************************************/ + +#define PVRSDK_VERSION "5.1" +#define PVRSDK_BUILD "18.1@5086772" +#define PVRVERSION_MAJ "18" +#define PVRVERSION_MIN "1" +#define PVRVERSION_BRANCH "181" +#define PVRVERSION_BRANCH_DEC "18.1" +#define PVRVERSION_BRANCH_NAME "REL/18.1" +#define PVRVERSION_BUILD "5086772" +#define PVRVERSION_BUILD_HI "508" +#define PVRVERSION_BUILD_LO "6772" + +#define PVRSDK_COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved." + +#endif /* __SDKVER_H_ */ + +/***************************************************************************** + End of file (sdkver.h) +*****************************************************************************/ diff --git a/sources/pvrsdk/Include/vk_bindings.h b/sources/pvrsdk/Include/vk_bindings.h new file mode 100755 index 00000000..f9f42dad --- /dev/null +++ b/sources/pvrsdk/Include/vk_bindings.h @@ -0,0 +1,515 @@ +// *** THIS FILE IS GENERATED - DO NOT EDIT *** +// See vk_bindings_generator.py for modifications + +/* +\brief Vk Vulkan function pointers. +\file vk_bindings.h +\author PowerVR by Imagination, Developer Technology Team +\copyright Copyright (c) Imagination Technologies Limited. +*/ + +#pragma once +#ifndef VK_PROTOTYPES +#define VK_NO_PROTOTYPES 1 +#endif +#include "vulkan/vulkan.h" + +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName); + +// Vulkan function pointer table +typedef struct VkBindings_ { + + // ---- Before using Vulkan, an application must initialize it by loading the Vulkan commands, and creating a VkInstance object. + + // ---- This function table provides the functions necessary for achieving this. + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; + PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; + PFN_vkCreateInstance vkCreateInstance; + PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; +} VkBindings; + +// Instance function pointers +typedef struct VkInstanceBindings_ { + // Manually add in vkGetPhysicalDeviceProcAddr entry + PFN_GetPhysicalDeviceProcAddr vkGetPhysicalDeviceProcAddr; + + // ---- Core 1_0 commands + PFN_vkCreateInstance vkCreateInstance; + PFN_vkDestroyInstance vkDestroyInstance; + PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; + PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; + PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; + PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; + PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; + PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + PFN_vkCreateDevice vkCreateDevice; + PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; + PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; + PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; + PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; + + // ---- Core 1_1 commands + PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; + PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups; + PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2; + PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2; + PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2; + PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2; + PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2; + PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties; + PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties; + PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties; + + // ---- VK_KHR_surface extension commands + PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; + PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; + + // ---- VK_KHR_swapchain extension commands + PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR; + + // ---- VK_KHR_display extension commands + PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR; + PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR; + PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR; + PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR; + PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR; + PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR; + PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR; + + // ---- VK_KHR_xlib_surface extension commands +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR; +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR; +#endif // VK_USE_PLATFORM_XLIB_KHR + + // ---- VK_KHR_xcb_surface extension commands +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR; +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR; +#endif // VK_USE_PLATFORM_XCB_KHR + + // ---- VK_KHR_wayland_surface extension commands +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR; +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + + // ---- VK_KHR_mir_surface extension commands +#ifdef VK_USE_PLATFORM_MIR_KHR + PFN_vkCreateMirSurfaceKHR vkCreateMirSurfaceKHR; +#endif // VK_USE_PLATFORM_MIR_KHR +#ifdef VK_USE_PLATFORM_MIR_KHR + PFN_vkGetPhysicalDeviceMirPresentationSupportKHR vkGetPhysicalDeviceMirPresentationSupportKHR; +#endif // VK_USE_PLATFORM_MIR_KHR + + // ---- VK_KHR_android_surface extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_KHR_win32_surface extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_get_physical_device_properties2 extension commands + PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; + PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR; + PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR; + PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR; + PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR; + + // ---- VK_KHR_device_group_creation extension commands + PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR; + + // ---- VK_KHR_external_memory_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR; + + // ---- VK_KHR_external_semaphore_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR; + + // ---- VK_KHR_external_fence_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR; + + // ---- VK_KHR_get_surface_capabilities2 extension commands + PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR; + PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR; + + // ---- VK_EXT_debug_report extension commands + PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT; + PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT; + PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT; + + // ---- VK_NV_external_memory_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV; + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN; +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_NVX_device_generated_commands extension commands + PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX; + + // ---- VK_EXT_direct_mode_display extension commands + PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT; + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT; + + // ---- VK_MVK_ios_surface extension commands +#ifdef VK_USE_PLATFORM_IOS_MVK + PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK; +#endif // VK_USE_PLATFORM_IOS_MVK + + // ---- VK_MVK_macos_surface extension commands +#ifdef VK_USE_PLATFORM_MACOS_MVK + PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK; +#endif // VK_USE_PLATFORM_MACOS_MVK + + // ---- VK_EXT_debug_utils extension commands + PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT; + PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT; + PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT; + PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT; + PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT; + PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT; + PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT; + PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT; + PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; + PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT; + PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT; + + // ---- VK_EXT_sample_locations extension commands + PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT; +} VkInstanceBindings; +// Device function pointers +typedef struct VkDeviceBindings_ { + + // ---- Core 1_0 commands + PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + PFN_vkDestroyDevice vkDestroyDevice; + PFN_vkGetDeviceQueue vkGetDeviceQueue; + PFN_vkQueueSubmit vkQueueSubmit; + PFN_vkQueueWaitIdle vkQueueWaitIdle; + PFN_vkDeviceWaitIdle vkDeviceWaitIdle; + PFN_vkAllocateMemory vkAllocateMemory; + PFN_vkFreeMemory vkFreeMemory; + PFN_vkMapMemory vkMapMemory; + PFN_vkUnmapMemory vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; + PFN_vkBindBufferMemory vkBindBufferMemory; + PFN_vkBindImageMemory vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; + PFN_vkQueueBindSparse vkQueueBindSparse; + PFN_vkCreateFence vkCreateFence; + PFN_vkDestroyFence vkDestroyFence; + PFN_vkResetFences vkResetFences; + PFN_vkGetFenceStatus vkGetFenceStatus; + PFN_vkWaitForFences vkWaitForFences; + PFN_vkCreateSemaphore vkCreateSemaphore; + PFN_vkDestroySemaphore vkDestroySemaphore; + PFN_vkCreateEvent vkCreateEvent; + PFN_vkDestroyEvent vkDestroyEvent; + PFN_vkGetEventStatus vkGetEventStatus; + PFN_vkSetEvent vkSetEvent; + PFN_vkResetEvent vkResetEvent; + PFN_vkCreateQueryPool vkCreateQueryPool; + PFN_vkDestroyQueryPool vkDestroyQueryPool; + PFN_vkGetQueryPoolResults vkGetQueryPoolResults; + PFN_vkCreateBuffer vkCreateBuffer; + PFN_vkDestroyBuffer vkDestroyBuffer; + PFN_vkCreateBufferView vkCreateBufferView; + PFN_vkDestroyBufferView vkDestroyBufferView; + PFN_vkCreateImage vkCreateImage; + PFN_vkDestroyImage vkDestroyImage; + PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; + PFN_vkCreateImageView vkCreateImageView; + PFN_vkDestroyImageView vkDestroyImageView; + PFN_vkCreateShaderModule vkCreateShaderModule; + PFN_vkDestroyShaderModule vkDestroyShaderModule; + PFN_vkCreatePipelineCache vkCreatePipelineCache; + PFN_vkDestroyPipelineCache vkDestroyPipelineCache; + PFN_vkGetPipelineCacheData vkGetPipelineCacheData; + PFN_vkMergePipelineCaches vkMergePipelineCaches; + PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; + PFN_vkCreateComputePipelines vkCreateComputePipelines; + PFN_vkDestroyPipeline vkDestroyPipeline; + PFN_vkCreatePipelineLayout vkCreatePipelineLayout; + PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; + PFN_vkCreateSampler vkCreateSampler; + PFN_vkDestroySampler vkDestroySampler; + PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; + PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; + PFN_vkCreateDescriptorPool vkCreateDescriptorPool; + PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; + PFN_vkResetDescriptorPool vkResetDescriptorPool; + PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; + PFN_vkFreeDescriptorSets vkFreeDescriptorSets; + PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; + PFN_vkCreateFramebuffer vkCreateFramebuffer; + PFN_vkDestroyFramebuffer vkDestroyFramebuffer; + PFN_vkCreateRenderPass vkCreateRenderPass; + PFN_vkDestroyRenderPass vkDestroyRenderPass; + PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; + PFN_vkCreateCommandPool vkCreateCommandPool; + PFN_vkDestroyCommandPool vkDestroyCommandPool; + PFN_vkResetCommandPool vkResetCommandPool; + PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; + PFN_vkFreeCommandBuffers vkFreeCommandBuffers; + PFN_vkBeginCommandBuffer vkBeginCommandBuffer; + PFN_vkEndCommandBuffer vkEndCommandBuffer; + PFN_vkResetCommandBuffer vkResetCommandBuffer; + PFN_vkCmdBindPipeline vkCmdBindPipeline; + PFN_vkCmdSetViewport vkCmdSetViewport; + PFN_vkCmdSetScissor vkCmdSetScissor; + PFN_vkCmdSetLineWidth vkCmdSetLineWidth; + PFN_vkCmdSetDepthBias vkCmdSetDepthBias; + PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; + PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; + PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; + PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; + PFN_vkCmdSetStencilReference vkCmdSetStencilReference; + PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; + PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; + PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; + PFN_vkCmdDraw vkCmdDraw; + PFN_vkCmdDrawIndexed vkCmdDrawIndexed; + PFN_vkCmdDrawIndirect vkCmdDrawIndirect; + PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; + PFN_vkCmdDispatch vkCmdDispatch; + PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; + PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + PFN_vkCmdCopyImage vkCmdCopyImage; + PFN_vkCmdBlitImage vkCmdBlitImage; + PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; + PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; + PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; + PFN_vkCmdFillBuffer vkCmdFillBuffer; + PFN_vkCmdClearColorImage vkCmdClearColorImage; + PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; + PFN_vkCmdClearAttachments vkCmdClearAttachments; + PFN_vkCmdResolveImage vkCmdResolveImage; + PFN_vkCmdSetEvent vkCmdSetEvent; + PFN_vkCmdResetEvent vkCmdResetEvent; + PFN_vkCmdWaitEvents vkCmdWaitEvents; + PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; + PFN_vkCmdBeginQuery vkCmdBeginQuery; + PFN_vkCmdEndQuery vkCmdEndQuery; + PFN_vkCmdResetQueryPool vkCmdResetQueryPool; + PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; + PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; + PFN_vkCmdPushConstants vkCmdPushConstants; + PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; + PFN_vkCmdNextSubpass vkCmdNextSubpass; + PFN_vkCmdEndRenderPass vkCmdEndRenderPass; + PFN_vkCmdExecuteCommands vkCmdExecuteCommands; + + // ---- Core 1_1 commands + PFN_vkBindBufferMemory2 vkBindBufferMemory2; + PFN_vkBindImageMemory2 vkBindImageMemory2; + PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures; + PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask; + PFN_vkCmdDispatchBase vkCmdDispatchBase; + PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2; + PFN_vkTrimCommandPool vkTrimCommandPool; + PFN_vkGetDeviceQueue2 vkGetDeviceQueue2; + PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion; + PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion; + PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate; + PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate; + PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate; + PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport; + + // ---- VK_KHR_swapchain extension commands + PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; + PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; + PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; + PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; + PFN_vkQueuePresentKHR vkQueuePresentKHR; + PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR; + PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR; + PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR; + + // ---- VK_KHR_display_swapchain extension commands + PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; + + // ---- VK_KHR_device_group extension commands + PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR; + PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR; + PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR; + + // ---- VK_KHR_maintenance1 extension commands + PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; + + // ---- VK_KHR_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_memory_fd extension commands + PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR; + PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR; + + // ---- VK_KHR_external_semaphore_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_semaphore_fd extension commands + PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR; + PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR; + + // ---- VK_KHR_push_descriptor extension commands + PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; + PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; + + // ---- VK_KHR_descriptor_update_template extension commands + PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; + PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; + PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; + + // ---- VK_KHR_shared_presentable_image extension commands + PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR; + + // ---- VK_KHR_external_fence_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_fence_fd extension commands + PFN_vkImportFenceFdKHR vkImportFenceFdKHR; + PFN_vkGetFenceFdKHR vkGetFenceFdKHR; + + // ---- VK_KHR_get_memory_requirements2 extension commands + PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; + PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; + PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR; + + // ---- VK_KHR_sampler_ycbcr_conversion extension commands + PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; + PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; + + // ---- VK_KHR_bind_memory2 extension commands + PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; + PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; + + // ---- VK_KHR_maintenance3 extension commands + PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; + + // ---- VK_EXT_debug_marker extension commands + PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT; + PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT; + PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT; + PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT; + PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT; + + // ---- VK_AMD_draw_indirect_count extension commands + PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD; + PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD; + + // ---- VK_AMD_shader_info extension commands + PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD; + + // ---- VK_NV_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_NVX_device_generated_commands extension commands + PFN_vkCmdProcessCommandsNVX vkCmdProcessCommandsNVX; + PFN_vkCmdReserveSpaceForCommandsNVX vkCmdReserveSpaceForCommandsNVX; + PFN_vkCreateIndirectCommandsLayoutNVX vkCreateIndirectCommandsLayoutNVX; + PFN_vkDestroyIndirectCommandsLayoutNVX vkDestroyIndirectCommandsLayoutNVX; + PFN_vkCreateObjectTableNVX vkCreateObjectTableNVX; + PFN_vkDestroyObjectTableNVX vkDestroyObjectTableNVX; + PFN_vkRegisterObjectsNVX vkRegisterObjectsNVX; + PFN_vkUnregisterObjectsNVX vkUnregisterObjectsNVX; + + // ---- VK_NV_clip_space_w_scaling extension commands + PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV; + + // ---- VK_EXT_display_control extension commands + PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT; + PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT; + PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT; + PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT; + + // ---- VK_GOOGLE_display_timing extension commands + PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE; + PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE; + + // ---- VK_EXT_discard_rectangles extension commands + PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT; + + // ---- VK_EXT_hdr_metadata extension commands + PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT; + + // ---- VK_ANDROID_external_memory_android_hardware_buffer extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_EXT_sample_locations extension commands + PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT; + + // ---- VK_EXT_validation_cache extension commands + PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT; + PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT; + PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; + PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT; + + // ---- VK_EXT_external_memory_host extension commands + PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT; + + // ---- VK_AMD_buffer_marker extension commands + PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; +} VkDeviceBindings; + diff --git a/sources/pvrsdk/Include/vk_bindings_helper.h b/sources/pvrsdk/Include/vk_bindings_helper.h new file mode 100755 index 00000000..4df4e1fa --- /dev/null +++ b/sources/pvrsdk/Include/vk_bindings_helper.h @@ -0,0 +1,410 @@ +// *** THIS FILE IS GENERATED - DO NOT EDIT *** +// See vk_bindings_helper_generator.py for modifications + +/* +\brief Helper functions for filling vk bindings function pointer tables. +\file vk_bindings_helper.h +\author PowerVR by Imagination, Developer Technology Team +\copyright Copyright (c) Imagination Technologies Limited. +*/ + +#pragma once +#include +#include +#include "pvr_openlib.h" +#include "vk_bindings.h" + +namespace vk { +namespace internal { +/** DEFINE THE PLATFORM SPECIFIC LIBRARY NAME **/ +#ifdef _WIN32 +static const char* libName = "vulkan-1.dll"; +#elif defined(TARGET_OS_MAC) +static const char* libName = "libvulkan.dylib"; +#else +static const char* libName = "libvulkan.so.1;libvulkan.so"; +#endif +} +} + +static inline bool initVkBindings(VkBindings *bindings) +{ + pvr::lib::LIBTYPE lib = pvr::lib::openlib(vk::internal::libName); + + memset(bindings, 0, sizeof(*bindings)); + + // Load the function pointer dynamically for vkGetInstanceProcAddr + bindings->vkGetInstanceProcAddr = pvr::lib::getLibFunctionChecked(lib, "vkGetInstanceProcAddr"); + + // Use vkGetInstanceProcAddr with a NULL instance to retrieve the function pointers + bindings->vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties)bindings->vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties"); + bindings->vkEnumerateInstanceLayerProperties = (PFN_vkEnumerateInstanceLayerProperties)bindings->vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceLayerProperties"); + bindings->vkCreateInstance = (PFN_vkCreateInstance)bindings->vkGetInstanceProcAddr(NULL, "vkCreateInstance"); + bindings->vkEnumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion)bindings->vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion"); + + // validate that we have all necessary function pointers to continue + if (!bindings->vkEnumerateInstanceExtensionProperties || !bindings->vkEnumerateInstanceLayerProperties || !bindings->vkCreateInstance) + { + return false; + } + return true; +} + + +static inline void initVkDeviceBindings(VkDevice device, VkDeviceBindings *bindings, PFN_vkGetDeviceProcAddr getDeviceProcAddress) { + memset(bindings, 0, sizeof(*bindings)); + // Device function pointers + bindings->vkGetDeviceProcAddr = getDeviceProcAddress; + bindings->vkDestroyDevice = (PFN_vkDestroyDevice) getDeviceProcAddress(device, "vkDestroyDevice"); + bindings->vkGetDeviceQueue = (PFN_vkGetDeviceQueue) getDeviceProcAddress(device, "vkGetDeviceQueue"); + bindings->vkQueueSubmit = (PFN_vkQueueSubmit) getDeviceProcAddress(device, "vkQueueSubmit"); + bindings->vkQueueWaitIdle = (PFN_vkQueueWaitIdle) getDeviceProcAddress(device, "vkQueueWaitIdle"); + bindings->vkDeviceWaitIdle = (PFN_vkDeviceWaitIdle) getDeviceProcAddress(device, "vkDeviceWaitIdle"); + bindings->vkAllocateMemory = (PFN_vkAllocateMemory) getDeviceProcAddress(device, "vkAllocateMemory"); + bindings->vkFreeMemory = (PFN_vkFreeMemory) getDeviceProcAddress(device, "vkFreeMemory"); + bindings->vkMapMemory = (PFN_vkMapMemory) getDeviceProcAddress(device, "vkMapMemory"); + bindings->vkUnmapMemory = (PFN_vkUnmapMemory) getDeviceProcAddress(device, "vkUnmapMemory"); + bindings->vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges) getDeviceProcAddress(device, "vkFlushMappedMemoryRanges"); + bindings->vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges) getDeviceProcAddress(device, "vkInvalidateMappedMemoryRanges"); + bindings->vkGetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment) getDeviceProcAddress(device, "vkGetDeviceMemoryCommitment"); + bindings->vkBindBufferMemory = (PFN_vkBindBufferMemory) getDeviceProcAddress(device, "vkBindBufferMemory"); + bindings->vkBindImageMemory = (PFN_vkBindImageMemory) getDeviceProcAddress(device, "vkBindImageMemory"); + bindings->vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements) getDeviceProcAddress(device, "vkGetBufferMemoryRequirements"); + bindings->vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements) getDeviceProcAddress(device, "vkGetImageMemoryRequirements"); + bindings->vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements) getDeviceProcAddress(device, "vkGetImageSparseMemoryRequirements"); + bindings->vkQueueBindSparse = (PFN_vkQueueBindSparse) getDeviceProcAddress(device, "vkQueueBindSparse"); + bindings->vkCreateFence = (PFN_vkCreateFence) getDeviceProcAddress(device, "vkCreateFence"); + bindings->vkDestroyFence = (PFN_vkDestroyFence) getDeviceProcAddress(device, "vkDestroyFence"); + bindings->vkResetFences = (PFN_vkResetFences) getDeviceProcAddress(device, "vkResetFences"); + bindings->vkGetFenceStatus = (PFN_vkGetFenceStatus) getDeviceProcAddress(device, "vkGetFenceStatus"); + bindings->vkWaitForFences = (PFN_vkWaitForFences) getDeviceProcAddress(device, "vkWaitForFences"); + bindings->vkCreateSemaphore = (PFN_vkCreateSemaphore) getDeviceProcAddress(device, "vkCreateSemaphore"); + bindings->vkDestroySemaphore = (PFN_vkDestroySemaphore) getDeviceProcAddress(device, "vkDestroySemaphore"); + bindings->vkCreateEvent = (PFN_vkCreateEvent) getDeviceProcAddress(device, "vkCreateEvent"); + bindings->vkDestroyEvent = (PFN_vkDestroyEvent) getDeviceProcAddress(device, "vkDestroyEvent"); + bindings->vkGetEventStatus = (PFN_vkGetEventStatus) getDeviceProcAddress(device, "vkGetEventStatus"); + bindings->vkSetEvent = (PFN_vkSetEvent) getDeviceProcAddress(device, "vkSetEvent"); + bindings->vkResetEvent = (PFN_vkResetEvent) getDeviceProcAddress(device, "vkResetEvent"); + bindings->vkCreateQueryPool = (PFN_vkCreateQueryPool) getDeviceProcAddress(device, "vkCreateQueryPool"); + bindings->vkDestroyQueryPool = (PFN_vkDestroyQueryPool) getDeviceProcAddress(device, "vkDestroyQueryPool"); + bindings->vkGetQueryPoolResults = (PFN_vkGetQueryPoolResults) getDeviceProcAddress(device, "vkGetQueryPoolResults"); + bindings->vkCreateBuffer = (PFN_vkCreateBuffer) getDeviceProcAddress(device, "vkCreateBuffer"); + bindings->vkDestroyBuffer = (PFN_vkDestroyBuffer) getDeviceProcAddress(device, "vkDestroyBuffer"); + bindings->vkCreateBufferView = (PFN_vkCreateBufferView) getDeviceProcAddress(device, "vkCreateBufferView"); + bindings->vkDestroyBufferView = (PFN_vkDestroyBufferView) getDeviceProcAddress(device, "vkDestroyBufferView"); + bindings->vkCreateImage = (PFN_vkCreateImage) getDeviceProcAddress(device, "vkCreateImage"); + bindings->vkDestroyImage = (PFN_vkDestroyImage) getDeviceProcAddress(device, "vkDestroyImage"); + bindings->vkGetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout) getDeviceProcAddress(device, "vkGetImageSubresourceLayout"); + bindings->vkCreateImageView = (PFN_vkCreateImageView) getDeviceProcAddress(device, "vkCreateImageView"); + bindings->vkDestroyImageView = (PFN_vkDestroyImageView) getDeviceProcAddress(device, "vkDestroyImageView"); + bindings->vkCreateShaderModule = (PFN_vkCreateShaderModule) getDeviceProcAddress(device, "vkCreateShaderModule"); + bindings->vkDestroyShaderModule = (PFN_vkDestroyShaderModule) getDeviceProcAddress(device, "vkDestroyShaderModule"); + bindings->vkCreatePipelineCache = (PFN_vkCreatePipelineCache) getDeviceProcAddress(device, "vkCreatePipelineCache"); + bindings->vkDestroyPipelineCache = (PFN_vkDestroyPipelineCache) getDeviceProcAddress(device, "vkDestroyPipelineCache"); + bindings->vkGetPipelineCacheData = (PFN_vkGetPipelineCacheData) getDeviceProcAddress(device, "vkGetPipelineCacheData"); + bindings->vkMergePipelineCaches = (PFN_vkMergePipelineCaches) getDeviceProcAddress(device, "vkMergePipelineCaches"); + bindings->vkCreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines) getDeviceProcAddress(device, "vkCreateGraphicsPipelines"); + bindings->vkCreateComputePipelines = (PFN_vkCreateComputePipelines) getDeviceProcAddress(device, "vkCreateComputePipelines"); + bindings->vkDestroyPipeline = (PFN_vkDestroyPipeline) getDeviceProcAddress(device, "vkDestroyPipeline"); + bindings->vkCreatePipelineLayout = (PFN_vkCreatePipelineLayout) getDeviceProcAddress(device, "vkCreatePipelineLayout"); + bindings->vkDestroyPipelineLayout = (PFN_vkDestroyPipelineLayout) getDeviceProcAddress(device, "vkDestroyPipelineLayout"); + bindings->vkCreateSampler = (PFN_vkCreateSampler) getDeviceProcAddress(device, "vkCreateSampler"); + bindings->vkDestroySampler = (PFN_vkDestroySampler) getDeviceProcAddress(device, "vkDestroySampler"); + bindings->vkCreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout) getDeviceProcAddress(device, "vkCreateDescriptorSetLayout"); + bindings->vkDestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout) getDeviceProcAddress(device, "vkDestroyDescriptorSetLayout"); + bindings->vkCreateDescriptorPool = (PFN_vkCreateDescriptorPool) getDeviceProcAddress(device, "vkCreateDescriptorPool"); + bindings->vkDestroyDescriptorPool = (PFN_vkDestroyDescriptorPool) getDeviceProcAddress(device, "vkDestroyDescriptorPool"); + bindings->vkResetDescriptorPool = (PFN_vkResetDescriptorPool) getDeviceProcAddress(device, "vkResetDescriptorPool"); + bindings->vkAllocateDescriptorSets = (PFN_vkAllocateDescriptorSets) getDeviceProcAddress(device, "vkAllocateDescriptorSets"); + bindings->vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets) getDeviceProcAddress(device, "vkFreeDescriptorSets"); + bindings->vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets) getDeviceProcAddress(device, "vkUpdateDescriptorSets"); + bindings->vkCreateFramebuffer = (PFN_vkCreateFramebuffer) getDeviceProcAddress(device, "vkCreateFramebuffer"); + bindings->vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer) getDeviceProcAddress(device, "vkDestroyFramebuffer"); + bindings->vkCreateRenderPass = (PFN_vkCreateRenderPass) getDeviceProcAddress(device, "vkCreateRenderPass"); + bindings->vkDestroyRenderPass = (PFN_vkDestroyRenderPass) getDeviceProcAddress(device, "vkDestroyRenderPass"); + bindings->vkGetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity) getDeviceProcAddress(device, "vkGetRenderAreaGranularity"); + bindings->vkCreateCommandPool = (PFN_vkCreateCommandPool) getDeviceProcAddress(device, "vkCreateCommandPool"); + bindings->vkDestroyCommandPool = (PFN_vkDestroyCommandPool) getDeviceProcAddress(device, "vkDestroyCommandPool"); + bindings->vkResetCommandPool = (PFN_vkResetCommandPool) getDeviceProcAddress(device, "vkResetCommandPool"); + bindings->vkAllocateCommandBuffers = (PFN_vkAllocateCommandBuffers) getDeviceProcAddress(device, "vkAllocateCommandBuffers"); + bindings->vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers) getDeviceProcAddress(device, "vkFreeCommandBuffers"); + bindings->vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer) getDeviceProcAddress(device, "vkBeginCommandBuffer"); + bindings->vkEndCommandBuffer = (PFN_vkEndCommandBuffer) getDeviceProcAddress(device, "vkEndCommandBuffer"); + bindings->vkResetCommandBuffer = (PFN_vkResetCommandBuffer) getDeviceProcAddress(device, "vkResetCommandBuffer"); + bindings->vkCmdBindPipeline = (PFN_vkCmdBindPipeline) getDeviceProcAddress(device, "vkCmdBindPipeline"); + bindings->vkCmdSetViewport = (PFN_vkCmdSetViewport) getDeviceProcAddress(device, "vkCmdSetViewport"); + bindings->vkCmdSetScissor = (PFN_vkCmdSetScissor) getDeviceProcAddress(device, "vkCmdSetScissor"); + bindings->vkCmdSetLineWidth = (PFN_vkCmdSetLineWidth) getDeviceProcAddress(device, "vkCmdSetLineWidth"); + bindings->vkCmdSetDepthBias = (PFN_vkCmdSetDepthBias) getDeviceProcAddress(device, "vkCmdSetDepthBias"); + bindings->vkCmdSetBlendConstants = (PFN_vkCmdSetBlendConstants) getDeviceProcAddress(device, "vkCmdSetBlendConstants"); + bindings->vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds) getDeviceProcAddress(device, "vkCmdSetDepthBounds"); + bindings->vkCmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask) getDeviceProcAddress(device, "vkCmdSetStencilCompareMask"); + bindings->vkCmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask) getDeviceProcAddress(device, "vkCmdSetStencilWriteMask"); + bindings->vkCmdSetStencilReference = (PFN_vkCmdSetStencilReference) getDeviceProcAddress(device, "vkCmdSetStencilReference"); + bindings->vkCmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets) getDeviceProcAddress(device, "vkCmdBindDescriptorSets"); + bindings->vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer) getDeviceProcAddress(device, "vkCmdBindIndexBuffer"); + bindings->vkCmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers) getDeviceProcAddress(device, "vkCmdBindVertexBuffers"); + bindings->vkCmdDraw = (PFN_vkCmdDraw) getDeviceProcAddress(device, "vkCmdDraw"); + bindings->vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed) getDeviceProcAddress(device, "vkCmdDrawIndexed"); + bindings->vkCmdDrawIndirect = (PFN_vkCmdDrawIndirect) getDeviceProcAddress(device, "vkCmdDrawIndirect"); + bindings->vkCmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect) getDeviceProcAddress(device, "vkCmdDrawIndexedIndirect"); + bindings->vkCmdDispatch = (PFN_vkCmdDispatch) getDeviceProcAddress(device, "vkCmdDispatch"); + bindings->vkCmdDispatchIndirect = (PFN_vkCmdDispatchIndirect) getDeviceProcAddress(device, "vkCmdDispatchIndirect"); + bindings->vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer) getDeviceProcAddress(device, "vkCmdCopyBuffer"); + bindings->vkCmdCopyImage = (PFN_vkCmdCopyImage) getDeviceProcAddress(device, "vkCmdCopyImage"); + bindings->vkCmdBlitImage = (PFN_vkCmdBlitImage) getDeviceProcAddress(device, "vkCmdBlitImage"); + bindings->vkCmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage) getDeviceProcAddress(device, "vkCmdCopyBufferToImage"); + bindings->vkCmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer) getDeviceProcAddress(device, "vkCmdCopyImageToBuffer"); + bindings->vkCmdUpdateBuffer = (PFN_vkCmdUpdateBuffer) getDeviceProcAddress(device, "vkCmdUpdateBuffer"); + bindings->vkCmdFillBuffer = (PFN_vkCmdFillBuffer) getDeviceProcAddress(device, "vkCmdFillBuffer"); + bindings->vkCmdClearColorImage = (PFN_vkCmdClearColorImage) getDeviceProcAddress(device, "vkCmdClearColorImage"); + bindings->vkCmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage) getDeviceProcAddress(device, "vkCmdClearDepthStencilImage"); + bindings->vkCmdClearAttachments = (PFN_vkCmdClearAttachments) getDeviceProcAddress(device, "vkCmdClearAttachments"); + bindings->vkCmdResolveImage = (PFN_vkCmdResolveImage) getDeviceProcAddress(device, "vkCmdResolveImage"); + bindings->vkCmdSetEvent = (PFN_vkCmdSetEvent) getDeviceProcAddress(device, "vkCmdSetEvent"); + bindings->vkCmdResetEvent = (PFN_vkCmdResetEvent) getDeviceProcAddress(device, "vkCmdResetEvent"); + bindings->vkCmdWaitEvents = (PFN_vkCmdWaitEvents) getDeviceProcAddress(device, "vkCmdWaitEvents"); + bindings->vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier) getDeviceProcAddress(device, "vkCmdPipelineBarrier"); + bindings->vkCmdBeginQuery = (PFN_vkCmdBeginQuery) getDeviceProcAddress(device, "vkCmdBeginQuery"); + bindings->vkCmdEndQuery = (PFN_vkCmdEndQuery) getDeviceProcAddress(device, "vkCmdEndQuery"); + bindings->vkCmdResetQueryPool = (PFN_vkCmdResetQueryPool) getDeviceProcAddress(device, "vkCmdResetQueryPool"); + bindings->vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp) getDeviceProcAddress(device, "vkCmdWriteTimestamp"); + bindings->vkCmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults) getDeviceProcAddress(device, "vkCmdCopyQueryPoolResults"); + bindings->vkCmdPushConstants = (PFN_vkCmdPushConstants) getDeviceProcAddress(device, "vkCmdPushConstants"); + bindings->vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass) getDeviceProcAddress(device, "vkCmdBeginRenderPass"); + bindings->vkCmdNextSubpass = (PFN_vkCmdNextSubpass) getDeviceProcAddress(device, "vkCmdNextSubpass"); + bindings->vkCmdEndRenderPass = (PFN_vkCmdEndRenderPass) getDeviceProcAddress(device, "vkCmdEndRenderPass"); + bindings->vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands) getDeviceProcAddress(device, "vkCmdExecuteCommands"); + bindings->vkBindBufferMemory2 = (PFN_vkBindBufferMemory2) getDeviceProcAddress(device, "vkBindBufferMemory2"); + bindings->vkBindImageMemory2 = (PFN_vkBindImageMemory2) getDeviceProcAddress(device, "vkBindImageMemory2"); + bindings->vkGetDeviceGroupPeerMemoryFeatures = (PFN_vkGetDeviceGroupPeerMemoryFeatures) getDeviceProcAddress(device, "vkGetDeviceGroupPeerMemoryFeatures"); + bindings->vkCmdSetDeviceMask = (PFN_vkCmdSetDeviceMask) getDeviceProcAddress(device, "vkCmdSetDeviceMask"); + bindings->vkCmdDispatchBase = (PFN_vkCmdDispatchBase) getDeviceProcAddress(device, "vkCmdDispatchBase"); + bindings->vkGetImageMemoryRequirements2 = (PFN_vkGetImageMemoryRequirements2) getDeviceProcAddress(device, "vkGetImageMemoryRequirements2"); + bindings->vkGetBufferMemoryRequirements2 = (PFN_vkGetBufferMemoryRequirements2) getDeviceProcAddress(device, "vkGetBufferMemoryRequirements2"); + bindings->vkGetImageSparseMemoryRequirements2 = (PFN_vkGetImageSparseMemoryRequirements2) getDeviceProcAddress(device, "vkGetImageSparseMemoryRequirements2"); + bindings->vkTrimCommandPool = (PFN_vkTrimCommandPool) getDeviceProcAddress(device, "vkTrimCommandPool"); + bindings->vkGetDeviceQueue2 = (PFN_vkGetDeviceQueue2) getDeviceProcAddress(device, "vkGetDeviceQueue2"); + bindings->vkCreateSamplerYcbcrConversion = (PFN_vkCreateSamplerYcbcrConversion) getDeviceProcAddress(device, "vkCreateSamplerYcbcrConversion"); + bindings->vkDestroySamplerYcbcrConversion = (PFN_vkDestroySamplerYcbcrConversion) getDeviceProcAddress(device, "vkDestroySamplerYcbcrConversion"); + bindings->vkCreateDescriptorUpdateTemplate = (PFN_vkCreateDescriptorUpdateTemplate) getDeviceProcAddress(device, "vkCreateDescriptorUpdateTemplate"); + bindings->vkDestroyDescriptorUpdateTemplate = (PFN_vkDestroyDescriptorUpdateTemplate) getDeviceProcAddress(device, "vkDestroyDescriptorUpdateTemplate"); + bindings->vkUpdateDescriptorSetWithTemplate = (PFN_vkUpdateDescriptorSetWithTemplate) getDeviceProcAddress(device, "vkUpdateDescriptorSetWithTemplate"); + bindings->vkGetDescriptorSetLayoutSupport = (PFN_vkGetDescriptorSetLayoutSupport) getDeviceProcAddress(device, "vkGetDescriptorSetLayoutSupport"); + bindings->vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) getDeviceProcAddress(device, "vkCreateSwapchainKHR"); + bindings->vkDestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) getDeviceProcAddress(device, "vkDestroySwapchainKHR"); + bindings->vkGetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) getDeviceProcAddress(device, "vkGetSwapchainImagesKHR"); + bindings->vkAcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) getDeviceProcAddress(device, "vkAcquireNextImageKHR"); + bindings->vkQueuePresentKHR = (PFN_vkQueuePresentKHR) getDeviceProcAddress(device, "vkQueuePresentKHR"); + bindings->vkGetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR) getDeviceProcAddress(device, "vkGetDeviceGroupPresentCapabilitiesKHR"); + bindings->vkGetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR) getDeviceProcAddress(device, "vkGetDeviceGroupSurfacePresentModesKHR"); + bindings->vkAcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR) getDeviceProcAddress(device, "vkAcquireNextImage2KHR"); + bindings->vkCreateSharedSwapchainsKHR = (PFN_vkCreateSharedSwapchainsKHR) getDeviceProcAddress(device, "vkCreateSharedSwapchainsKHR"); + bindings->vkGetDeviceGroupPeerMemoryFeaturesKHR = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR) getDeviceProcAddress(device, "vkGetDeviceGroupPeerMemoryFeaturesKHR"); + bindings->vkCmdSetDeviceMaskKHR = (PFN_vkCmdSetDeviceMaskKHR) getDeviceProcAddress(device, "vkCmdSetDeviceMaskKHR"); + bindings->vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR) getDeviceProcAddress(device, "vkCmdDispatchBaseKHR"); + bindings->vkTrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR) getDeviceProcAddress(device, "vkTrimCommandPoolKHR"); +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR) getDeviceProcAddress(device, "vkGetMemoryWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkGetMemoryWin32HandlePropertiesKHR = (PFN_vkGetMemoryWin32HandlePropertiesKHR) getDeviceProcAddress(device, "vkGetMemoryWin32HandlePropertiesKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + bindings->vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR) getDeviceProcAddress(device, "vkGetMemoryFdKHR"); + bindings->vkGetMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR) getDeviceProcAddress(device, "vkGetMemoryFdPropertiesKHR"); +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR) getDeviceProcAddress(device, "vkImportSemaphoreWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR) getDeviceProcAddress(device, "vkGetSemaphoreWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + bindings->vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR) getDeviceProcAddress(device, "vkImportSemaphoreFdKHR"); + bindings->vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR) getDeviceProcAddress(device, "vkGetSemaphoreFdKHR"); + bindings->vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR) getDeviceProcAddress(device, "vkCmdPushDescriptorSetKHR"); + bindings->vkCmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR) getDeviceProcAddress(device, "vkCmdPushDescriptorSetWithTemplateKHR"); + bindings->vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR) getDeviceProcAddress(device, "vkCreateDescriptorUpdateTemplateKHR"); + bindings->vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR) getDeviceProcAddress(device, "vkDestroyDescriptorUpdateTemplateKHR"); + bindings->vkUpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR) getDeviceProcAddress(device, "vkUpdateDescriptorSetWithTemplateKHR"); + bindings->vkGetSwapchainStatusKHR = (PFN_vkGetSwapchainStatusKHR) getDeviceProcAddress(device, "vkGetSwapchainStatusKHR"); +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR) getDeviceProcAddress(device, "vkImportFenceWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR) getDeviceProcAddress(device, "vkGetFenceWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + bindings->vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR) getDeviceProcAddress(device, "vkImportFenceFdKHR"); + bindings->vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR) getDeviceProcAddress(device, "vkGetFenceFdKHR"); + bindings->vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR) getDeviceProcAddress(device, "vkGetImageMemoryRequirements2KHR"); + bindings->vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR) getDeviceProcAddress(device, "vkGetBufferMemoryRequirements2KHR"); + bindings->vkGetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR) getDeviceProcAddress(device, "vkGetImageSparseMemoryRequirements2KHR"); + bindings->vkCreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR) getDeviceProcAddress(device, "vkCreateSamplerYcbcrConversionKHR"); + bindings->vkDestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR) getDeviceProcAddress(device, "vkDestroySamplerYcbcrConversionKHR"); + bindings->vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR) getDeviceProcAddress(device, "vkBindBufferMemory2KHR"); + bindings->vkBindImageMemory2KHR = (PFN_vkBindImageMemory2KHR) getDeviceProcAddress(device, "vkBindImageMemory2KHR"); + bindings->vkGetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR) getDeviceProcAddress(device, "vkGetDescriptorSetLayoutSupportKHR"); + bindings->vkDebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT) getDeviceProcAddress(device, "vkDebugMarkerSetObjectTagEXT"); + bindings->vkDebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT) getDeviceProcAddress(device, "vkDebugMarkerSetObjectNameEXT"); + bindings->vkCmdDebugMarkerBeginEXT = (PFN_vkCmdDebugMarkerBeginEXT) getDeviceProcAddress(device, "vkCmdDebugMarkerBeginEXT"); + bindings->vkCmdDebugMarkerEndEXT = (PFN_vkCmdDebugMarkerEndEXT) getDeviceProcAddress(device, "vkCmdDebugMarkerEndEXT"); + bindings->vkCmdDebugMarkerInsertEXT = (PFN_vkCmdDebugMarkerInsertEXT) getDeviceProcAddress(device, "vkCmdDebugMarkerInsertEXT"); + bindings->vkCmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD) getDeviceProcAddress(device, "vkCmdDrawIndirectCountAMD"); + bindings->vkCmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD) getDeviceProcAddress(device, "vkCmdDrawIndexedIndirectCountAMD"); + bindings->vkGetShaderInfoAMD = (PFN_vkGetShaderInfoAMD) getDeviceProcAddress(device, "vkGetShaderInfoAMD"); +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkGetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV) getDeviceProcAddress(device, "vkGetMemoryWin32HandleNV"); +#endif // VK_USE_PLATFORM_WIN32_KHR + bindings->vkCmdProcessCommandsNVX = (PFN_vkCmdProcessCommandsNVX) getDeviceProcAddress(device, "vkCmdProcessCommandsNVX"); + bindings->vkCmdReserveSpaceForCommandsNVX = (PFN_vkCmdReserveSpaceForCommandsNVX) getDeviceProcAddress(device, "vkCmdReserveSpaceForCommandsNVX"); + bindings->vkCreateIndirectCommandsLayoutNVX = (PFN_vkCreateIndirectCommandsLayoutNVX) getDeviceProcAddress(device, "vkCreateIndirectCommandsLayoutNVX"); + bindings->vkDestroyIndirectCommandsLayoutNVX = (PFN_vkDestroyIndirectCommandsLayoutNVX) getDeviceProcAddress(device, "vkDestroyIndirectCommandsLayoutNVX"); + bindings->vkCreateObjectTableNVX = (PFN_vkCreateObjectTableNVX) getDeviceProcAddress(device, "vkCreateObjectTableNVX"); + bindings->vkDestroyObjectTableNVX = (PFN_vkDestroyObjectTableNVX) getDeviceProcAddress(device, "vkDestroyObjectTableNVX"); + bindings->vkRegisterObjectsNVX = (PFN_vkRegisterObjectsNVX) getDeviceProcAddress(device, "vkRegisterObjectsNVX"); + bindings->vkUnregisterObjectsNVX = (PFN_vkUnregisterObjectsNVX) getDeviceProcAddress(device, "vkUnregisterObjectsNVX"); + bindings->vkCmdSetViewportWScalingNV = (PFN_vkCmdSetViewportWScalingNV) getDeviceProcAddress(device, "vkCmdSetViewportWScalingNV"); + bindings->vkDisplayPowerControlEXT = (PFN_vkDisplayPowerControlEXT) getDeviceProcAddress(device, "vkDisplayPowerControlEXT"); + bindings->vkRegisterDeviceEventEXT = (PFN_vkRegisterDeviceEventEXT) getDeviceProcAddress(device, "vkRegisterDeviceEventEXT"); + bindings->vkRegisterDisplayEventEXT = (PFN_vkRegisterDisplayEventEXT) getDeviceProcAddress(device, "vkRegisterDisplayEventEXT"); + bindings->vkGetSwapchainCounterEXT = (PFN_vkGetSwapchainCounterEXT) getDeviceProcAddress(device, "vkGetSwapchainCounterEXT"); + bindings->vkGetRefreshCycleDurationGOOGLE = (PFN_vkGetRefreshCycleDurationGOOGLE) getDeviceProcAddress(device, "vkGetRefreshCycleDurationGOOGLE"); + bindings->vkGetPastPresentationTimingGOOGLE = (PFN_vkGetPastPresentationTimingGOOGLE) getDeviceProcAddress(device, "vkGetPastPresentationTimingGOOGLE"); + bindings->vkCmdSetDiscardRectangleEXT = (PFN_vkCmdSetDiscardRectangleEXT) getDeviceProcAddress(device, "vkCmdSetDiscardRectangleEXT"); + bindings->vkSetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT) getDeviceProcAddress(device, "vkSetHdrMetadataEXT"); +#ifdef VK_USE_PLATFORM_ANDROID_KHR + bindings->vkGetAndroidHardwareBufferPropertiesANDROID = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID) getDeviceProcAddress(device, "vkGetAndroidHardwareBufferPropertiesANDROID"); +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + bindings->vkGetMemoryAndroidHardwareBufferANDROID = (PFN_vkGetMemoryAndroidHardwareBufferANDROID) getDeviceProcAddress(device, "vkGetMemoryAndroidHardwareBufferANDROID"); +#endif // VK_USE_PLATFORM_ANDROID_KHR + bindings->vkCmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT) getDeviceProcAddress(device, "vkCmdSetSampleLocationsEXT"); + bindings->vkCreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT) getDeviceProcAddress(device, "vkCreateValidationCacheEXT"); + bindings->vkDestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT) getDeviceProcAddress(device, "vkDestroyValidationCacheEXT"); + bindings->vkMergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT) getDeviceProcAddress(device, "vkMergeValidationCachesEXT"); + bindings->vkGetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT) getDeviceProcAddress(device, "vkGetValidationCacheDataEXT"); + bindings->vkGetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT) getDeviceProcAddress(device, "vkGetMemoryHostPointerPropertiesEXT"); + bindings->vkCmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD) getDeviceProcAddress(device, "vkCmdWriteBufferMarkerAMD"); +} + + +static inline void initVkInstanceBindings(VkInstance instance, VkInstanceBindings *bindings, PFN_vkGetInstanceProcAddr getinstanceProcAddress) { + memset(bindings, 0, sizeof(*bindings)); + // Instance function pointers + bindings->vkDestroyInstance = (PFN_vkDestroyInstance) getinstanceProcAddress(instance, "vkDestroyInstance"); + bindings->vkEnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices) getinstanceProcAddress(instance, "vkEnumeratePhysicalDevices"); + bindings->vkGetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures) getinstanceProcAddress(instance, "vkGetPhysicalDeviceFeatures"); + bindings->vkGetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceFormatProperties"); + bindings->vkGetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceImageFormatProperties"); + bindings->vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceProperties"); + bindings->vkGetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceQueueFamilyProperties"); + bindings->vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceMemoryProperties"); + bindings->vkGetInstanceProcAddr = getinstanceProcAddress; + bindings->vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr) getinstanceProcAddress(instance, "vkGetDeviceProcAddr"); + bindings->vkCreateDevice = (PFN_vkCreateDevice) getinstanceProcAddress(instance, "vkCreateDevice"); + bindings->vkEnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties) getinstanceProcAddress(instance, "vkEnumerateDeviceExtensionProperties"); + bindings->vkEnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties) getinstanceProcAddress(instance, "vkEnumerateDeviceLayerProperties"); + bindings->vkGetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSparseImageFormatProperties"); + bindings->vkEnumeratePhysicalDeviceGroups = (PFN_vkEnumeratePhysicalDeviceGroups) getinstanceProcAddress(instance, "vkEnumeratePhysicalDeviceGroups"); + bindings->vkGetPhysicalDeviceFeatures2 = (PFN_vkGetPhysicalDeviceFeatures2) getinstanceProcAddress(instance, "vkGetPhysicalDeviceFeatures2"); + bindings->vkGetPhysicalDeviceProperties2 = (PFN_vkGetPhysicalDeviceProperties2) getinstanceProcAddress(instance, "vkGetPhysicalDeviceProperties2"); + bindings->vkGetPhysicalDeviceFormatProperties2 = (PFN_vkGetPhysicalDeviceFormatProperties2) getinstanceProcAddress(instance, "vkGetPhysicalDeviceFormatProperties2"); + bindings->vkGetPhysicalDeviceImageFormatProperties2 = (PFN_vkGetPhysicalDeviceImageFormatProperties2) getinstanceProcAddress(instance, "vkGetPhysicalDeviceImageFormatProperties2"); + bindings->vkGetPhysicalDeviceQueueFamilyProperties2 = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2) getinstanceProcAddress(instance, "vkGetPhysicalDeviceQueueFamilyProperties2"); + bindings->vkGetPhysicalDeviceMemoryProperties2 = (PFN_vkGetPhysicalDeviceMemoryProperties2) getinstanceProcAddress(instance, "vkGetPhysicalDeviceMemoryProperties2"); + bindings->vkGetPhysicalDeviceSparseImageFormatProperties2 = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSparseImageFormatProperties2"); + bindings->vkGetPhysicalDeviceExternalBufferProperties = (PFN_vkGetPhysicalDeviceExternalBufferProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceExternalBufferProperties"); + bindings->vkGetPhysicalDeviceExternalFenceProperties = (PFN_vkGetPhysicalDeviceExternalFenceProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceExternalFenceProperties"); + bindings->vkGetPhysicalDeviceExternalSemaphoreProperties = (PFN_vkGetPhysicalDeviceExternalSemaphoreProperties) getinstanceProcAddress(instance, "vkGetPhysicalDeviceExternalSemaphoreProperties"); + bindings->vkDestroySurfaceKHR = (PFN_vkDestroySurfaceKHR) getinstanceProcAddress(instance, "vkDestroySurfaceKHR"); + bindings->vkGetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSurfaceSupportKHR"); + bindings->vkGetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); + bindings->vkGetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR"); + bindings->vkGetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR"); + bindings->vkGetPhysicalDevicePresentRectanglesKHR = (PFN_vkGetPhysicalDevicePresentRectanglesKHR) getinstanceProcAddress(instance, "vkGetPhysicalDevicePresentRectanglesKHR"); + bindings->vkGetPhysicalDeviceDisplayPropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPropertiesKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceDisplayPropertiesKHR"); + bindings->vkGetPhysicalDeviceDisplayPlanePropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"); + bindings->vkGetDisplayPlaneSupportedDisplaysKHR = (PFN_vkGetDisplayPlaneSupportedDisplaysKHR) getinstanceProcAddress(instance, "vkGetDisplayPlaneSupportedDisplaysKHR"); + bindings->vkGetDisplayModePropertiesKHR = (PFN_vkGetDisplayModePropertiesKHR) getinstanceProcAddress(instance, "vkGetDisplayModePropertiesKHR"); + bindings->vkCreateDisplayModeKHR = (PFN_vkCreateDisplayModeKHR) getinstanceProcAddress(instance, "vkCreateDisplayModeKHR"); + bindings->vkGetDisplayPlaneCapabilitiesKHR = (PFN_vkGetDisplayPlaneCapabilitiesKHR) getinstanceProcAddress(instance, "vkGetDisplayPlaneCapabilitiesKHR"); + bindings->vkCreateDisplayPlaneSurfaceKHR = (PFN_vkCreateDisplayPlaneSurfaceKHR) getinstanceProcAddress(instance, "vkCreateDisplayPlaneSurfaceKHR"); +#ifdef VK_USE_PLATFORM_XLIB_KHR + bindings->vkCreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR) getinstanceProcAddress(instance, "vkCreateXlibSurfaceKHR"); +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + bindings->vkGetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + bindings->vkCreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR) getinstanceProcAddress(instance, "vkCreateXcbSurfaceKHR"); +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + bindings->vkGetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + bindings->vkCreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR) getinstanceProcAddress(instance, "vkCreateWaylandSurfaceKHR"); +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + bindings->vkGetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_MIR_KHR + bindings->vkCreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR) getinstanceProcAddress(instance, "vkCreateMirSurfaceKHR"); +#endif // VK_USE_PLATFORM_MIR_KHR +#ifdef VK_USE_PLATFORM_MIR_KHR + bindings->vkGetPhysicalDeviceMirPresentationSupportKHR = (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_MIR_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + bindings->vkCreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR) getinstanceProcAddress(instance, "vkCreateAndroidSurfaceKHR"); +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkCreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR) getinstanceProcAddress(instance, "vkCreateWin32SurfaceKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + bindings->vkGetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + bindings->vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceFeatures2KHR"); + bindings->vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceProperties2KHR"); + bindings->vkGetPhysicalDeviceFormatProperties2KHR = (PFN_vkGetPhysicalDeviceFormatProperties2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceFormatProperties2KHR"); + bindings->vkGetPhysicalDeviceImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceImageFormatProperties2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceImageFormatProperties2KHR"); + bindings->vkGetPhysicalDeviceQueueFamilyProperties2KHR = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"); + bindings->vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceMemoryProperties2KHR"); + bindings->vkGetPhysicalDeviceSparseImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"); + bindings->vkEnumeratePhysicalDeviceGroupsKHR = (PFN_vkEnumeratePhysicalDeviceGroupsKHR) getinstanceProcAddress(instance, "vkEnumeratePhysicalDeviceGroupsKHR"); + bindings->vkGetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"); + bindings->vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); + bindings->vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceExternalFencePropertiesKHR"); + bindings->vkGetPhysicalDeviceSurfaceCapabilities2KHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"); + bindings->vkGetPhysicalDeviceSurfaceFormats2KHR = (PFN_vkGetPhysicalDeviceSurfaceFormats2KHR) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSurfaceFormats2KHR"); + bindings->vkCreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT) getinstanceProcAddress(instance, "vkCreateDebugReportCallbackEXT"); + bindings->vkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT) getinstanceProcAddress(instance, "vkDestroyDebugReportCallbackEXT"); + bindings->vkDebugReportMessageEXT = (PFN_vkDebugReportMessageEXT) getinstanceProcAddress(instance, "vkDebugReportMessageEXT"); + bindings->vkGetPhysicalDeviceExternalImageFormatPropertiesNV = (PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV) getinstanceProcAddress(instance, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"); +#ifdef VK_USE_PLATFORM_VI_NN + bindings->vkCreateViSurfaceNN = (PFN_vkCreateViSurfaceNN) getinstanceProcAddress(instance, "vkCreateViSurfaceNN"); +#endif // VK_USE_PLATFORM_VI_NN + bindings->vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX = (PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX) getinstanceProcAddress(instance, "vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX"); + bindings->vkReleaseDisplayEXT = (PFN_vkReleaseDisplayEXT) getinstanceProcAddress(instance, "vkReleaseDisplayEXT"); +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + bindings->vkAcquireXlibDisplayEXT = (PFN_vkAcquireXlibDisplayEXT) getinstanceProcAddress(instance, "vkAcquireXlibDisplayEXT"); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + bindings->vkGetRandROutputDisplayEXT = (PFN_vkGetRandROutputDisplayEXT) getinstanceProcAddress(instance, "vkGetRandROutputDisplayEXT"); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + bindings->vkGetPhysicalDeviceSurfaceCapabilities2EXT = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT) getinstanceProcAddress(instance, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"); +#ifdef VK_USE_PLATFORM_IOS_MVK + bindings->vkCreateIOSSurfaceMVK = (PFN_vkCreateIOSSurfaceMVK) getinstanceProcAddress(instance, "vkCreateIOSSurfaceMVK"); +#endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_MACOS_MVK + bindings->vkCreateMacOSSurfaceMVK = (PFN_vkCreateMacOSSurfaceMVK) getinstanceProcAddress(instance, "vkCreateMacOSSurfaceMVK"); +#endif // VK_USE_PLATFORM_MACOS_MVK + bindings->vkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT) getinstanceProcAddress(instance, "vkSetDebugUtilsObjectNameEXT"); + bindings->vkSetDebugUtilsObjectTagEXT = (PFN_vkSetDebugUtilsObjectTagEXT) getinstanceProcAddress(instance, "vkSetDebugUtilsObjectTagEXT"); + bindings->vkQueueBeginDebugUtilsLabelEXT = (PFN_vkQueueBeginDebugUtilsLabelEXT) getinstanceProcAddress(instance, "vkQueueBeginDebugUtilsLabelEXT"); + bindings->vkQueueEndDebugUtilsLabelEXT = (PFN_vkQueueEndDebugUtilsLabelEXT) getinstanceProcAddress(instance, "vkQueueEndDebugUtilsLabelEXT"); + bindings->vkQueueInsertDebugUtilsLabelEXT = (PFN_vkQueueInsertDebugUtilsLabelEXT) getinstanceProcAddress(instance, "vkQueueInsertDebugUtilsLabelEXT"); + bindings->vkCmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT) getinstanceProcAddress(instance, "vkCmdBeginDebugUtilsLabelEXT"); + bindings->vkCmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT) getinstanceProcAddress(instance, "vkCmdEndDebugUtilsLabelEXT"); + bindings->vkCmdInsertDebugUtilsLabelEXT = (PFN_vkCmdInsertDebugUtilsLabelEXT) getinstanceProcAddress(instance, "vkCmdInsertDebugUtilsLabelEXT"); + bindings->vkCreateDebugUtilsMessengerEXT = (PFN_vkCreateDebugUtilsMessengerEXT) getinstanceProcAddress(instance, "vkCreateDebugUtilsMessengerEXT"); + bindings->vkDestroyDebugUtilsMessengerEXT = (PFN_vkDestroyDebugUtilsMessengerEXT) getinstanceProcAddress(instance, "vkDestroyDebugUtilsMessengerEXT"); + bindings->vkSubmitDebugUtilsMessageEXT = (PFN_vkSubmitDebugUtilsMessageEXT) getinstanceProcAddress(instance, "vkSubmitDebugUtilsMessageEXT"); + bindings->vkGetPhysicalDeviceMultisamplePropertiesEXT = (PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT) getinstanceProcAddress(instance, "vkGetPhysicalDeviceMultisamplePropertiesEXT"); +} diff --git a/sources/pvrsdk/Include/vulkan/vk_icd.h b/sources/pvrsdk/Include/vulkan/vk_icd.h new file mode 100755 index 00000000..35956a34 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vk_icd.h @@ -0,0 +1,170 @@ +// +// File: vk_icd.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef VKICD_H +#define VKICD_H + +#include "vulkan.h" +#include + +// Loader-ICD version negotiation API. Versions add the following features: +// Version 0 - Initial. Doesn't support vk_icdGetInstanceProcAddr +// or vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 1 - Add support for vk_icdGetInstanceProcAddr. +// Version 2 - Add Loader/ICD Interface version negotiation +// via vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 3 - Add ICD creation/destruction of KHR_surface objects. +// Version 4 - Add unknown physical device extension qyering via +// vk_icdGetPhysicalDeviceProcAddr. +// Version 5 - Tells ICDs that the loader is now paying attention to the +// application version of Vulkan passed into the ApplicationInfo +// structure during vkCreateInstance. This will tell the ICD +// that if the loader is older, it should automatically fail a +// call for any API version > 1.0. Otherwise, the loader will +// manually determine if it can support the expected version. +#define CURRENT_LOADER_ICD_INTERFACE_VERSION 5 +#define MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION 0 +#define MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION 4 +typedef VkResult(VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion); + +// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this +// file directly, it won't be found. +#ifndef PFN_GetPhysicalDeviceProcAddr +typedef PFN_vkVoidFunction(VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char *pName); +#endif + +/* + * The ICD must reserve space for a pointer for the loader's dispatch + * table, at the start of . + * The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro. + */ + +#define ICD_LOADER_MAGIC 0x01CDC0DE + +typedef union { + uintptr_t loaderMagic; + void *loaderData; +} VK_LOADER_DATA; + +static inline void set_loader_magic_value(void *pNewObject) { + VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + loader_info->loaderMagic = ICD_LOADER_MAGIC; +} + +static inline bool valid_loader_magic_value(void *pNewObject) { + const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC; +} + +/* + * Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that + * contains the platform-specific connection and surface information. + */ +typedef enum { + VK_ICD_WSI_PLATFORM_MIR, + VK_ICD_WSI_PLATFORM_WAYLAND, + VK_ICD_WSI_PLATFORM_WIN32, + VK_ICD_WSI_PLATFORM_XCB, + VK_ICD_WSI_PLATFORM_XLIB, + VK_ICD_WSI_PLATFORM_ANDROID, + VK_ICD_WSI_PLATFORM_MACOS, + VK_ICD_WSI_PLATFORM_IOS, + VK_ICD_WSI_PLATFORM_DISPLAY +} VkIcdWsiPlatform; + +typedef struct { + VkIcdWsiPlatform platform; +} VkIcdSurfaceBase; + +#ifdef VK_USE_PLATFORM_MIR_KHR +typedef struct { + VkIcdSurfaceBase base; + MirConnection *connection; + MirSurface *mirSurface; +} VkIcdSurfaceMir; +#endif // VK_USE_PLATFORM_MIR_KHR + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +typedef struct { + VkIcdSurfaceBase base; + struct wl_display *display; + struct wl_surface *surface; +} VkIcdSurfaceWayland; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + +#ifdef VK_USE_PLATFORM_WIN32_KHR +typedef struct { + VkIcdSurfaceBase base; + HINSTANCE hinstance; + HWND hwnd; +} VkIcdSurfaceWin32; +#endif // VK_USE_PLATFORM_WIN32_KHR + +#ifdef VK_USE_PLATFORM_XCB_KHR +typedef struct { + VkIcdSurfaceBase base; + xcb_connection_t *connection; + xcb_window_t window; +} VkIcdSurfaceXcb; +#endif // VK_USE_PLATFORM_XCB_KHR + +#ifdef VK_USE_PLATFORM_XLIB_KHR +typedef struct { + VkIcdSurfaceBase base; + Display *dpy; + Window window; +} VkIcdSurfaceXlib; +#endif // VK_USE_PLATFORM_XLIB_KHR + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +typedef struct { + VkIcdSurfaceBase base; + ANativeWindow *window; +} VkIcdSurfaceAndroid; +#endif // VK_USE_PLATFORM_ANDROID_KHR + +#ifdef VK_USE_PLATFORM_MACOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceMacOS; +#endif // VK_USE_PLATFORM_MACOS_MVK + +#ifdef VK_USE_PLATFORM_IOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceIOS; +#endif // VK_USE_PLATFORM_IOS_MVK + +typedef struct { + VkIcdSurfaceBase base; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkIcdSurfaceDisplay; + +#endif // VKICD_H diff --git a/sources/pvrsdk/Include/vulkan/vk_layer.h b/sources/pvrsdk/Include/vulkan/vk_layer.h new file mode 100755 index 00000000..e8300c37 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vk_layer.h @@ -0,0 +1,195 @@ +// +// File: vk_layer.h +// +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Need to define dispatch table + * Core struct can then have ptr to dispatch table at the top + * Along with object ptrs for current and next OBJ + */ +#pragma once + +#include "vulkan.h" +#if defined(__GNUC__) && __GNUC__ >= 4 +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#else +#define VK_LAYER_EXPORT +#endif + +// Definition for VkLayerDispatchTable and VkLayerInstanceDispatchTable now appear in externally generated header +#include "vk_layer_dispatch_table.h" + +#define MAX_NUM_UNKNOWN_EXTS 250 + + // Loader-Layer version negotiation API. Versions add the following features: + // Versions 0/1 - Initial. Doesn't support vk_layerGetPhysicalDeviceProcAddr + // or vk_icdNegotiateLoaderLayerInterfaceVersion. + // Version 2 - Add support for vk_layerGetPhysicalDeviceProcAddr and + // vk_icdNegotiateLoaderLayerInterfaceVersion. +#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2 +#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1 + +#define VK_CURRENT_CHAIN_VERSION 1 + +// Version negotiation values +typedef enum VkNegotiateLayerStructType { + LAYER_NEGOTIATE_UNINTIALIZED = 0, + LAYER_NEGOTIATE_INTERFACE_STRUCT = 1, +} VkNegotiateLayerStructType; + +// Version negotiation structures +typedef struct VkNegotiateLayerInterface { + VkNegotiateLayerStructType sType; + void *pNext; + uint32_t loaderLayerInterfaceVersion; + PFN_vkGetInstanceProcAddr pfnGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnGetPhysicalDeviceProcAddr; +} VkNegotiateLayerInterface; + +// Version negotiation functions +typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderLayerInterfaceVersion)(VkNegotiateLayerInterface *pVersionStruct); + +// Function prototype for unknown physical device extension command +typedef VkResult(VKAPI_PTR *PFN_PhysDevExt)(VkPhysicalDevice phys_device); + +// ------------------------------------------------------------------------------------------------ +// CreateInstance and CreateDevice support structures + +/* Sub type of structure for instance and device loader ext of CreateInfo. + * When sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + * or sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + * then VkLayerFunction indicates struct type pointed to by pNext + */ +typedef enum VkLayerFunction_ { + VK_LAYER_LINK_INFO = 0, + VK_LOADER_DATA_CALLBACK = 1 +} VkLayerFunction; + +typedef struct VkLayerInstanceLink_ { + struct VkLayerInstanceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnNextGetPhysicalDeviceProcAddr; +} VkLayerInstanceLink; + +/* + * When creating the device chain the loader needs to pass + * down information about it's device structure needed at + * the end of the chain. Passing the data via the + * VkLayerDeviceInfo avoids issues with finding the + * exact instance being used. + */ +typedef struct VkLayerDeviceInfo_ { + void *device_info; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; +} VkLayerDeviceInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkSetInstanceLoaderData)(VkInstance instance, + void *object); +typedef VkResult (VKAPI_PTR *PFN_vkSetDeviceLoaderData)(VkDevice device, + void *object); + +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerInstanceLink *pLayerInfo; + PFN_vkSetInstanceLoaderData pfnSetInstanceLoaderData; + } u; +} VkLayerInstanceCreateInfo; + +typedef struct VkLayerDeviceLink_ { + struct VkLayerDeviceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnNextGetDeviceProcAddr; +} VkLayerDeviceLink; + +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerDeviceLink *pLayerInfo; + PFN_vkSetDeviceLoaderData pfnSetDeviceLoaderData; + } u; +} VkLayerDeviceCreateInfo; + +#ifdef __cplusplus +extern "C" { +#endif + +VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct); + +typedef enum VkChainType { + VK_CHAIN_TYPE_UNKNOWN = 0, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES = 1, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES = 2, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION = 3, +} VkChainType; + +typedef struct VkChainHeader { + VkChainType type; + uint32_t version; + uint32_t size; +} VkChainHeader; + +typedef struct VkEnumerateInstanceExtensionPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceExtensionPropertiesChain *, const char *, uint32_t *, + VkExtensionProperties *); + const struct VkEnumerateInstanceExtensionPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const { + return pfnNextLayer(pNextLink, pLayerName, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceExtensionPropertiesChain; + +typedef struct VkEnumerateInstanceLayerPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceLayerPropertiesChain *, uint32_t *, VkLayerProperties *); + const struct VkEnumerateInstanceLayerPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pPropertyCount, VkLayerProperties *pProperties) const { + return pfnNextLayer(pNextLink, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceLayerPropertiesChain; + +typedef struct VkEnumerateInstanceVersionChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceVersionChain *, uint32_t *); + const struct VkEnumerateInstanceVersionChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pApiVersion) const { + return pfnNextLayer(pNextLink, pApiVersion); + } +#endif +} VkEnumerateInstanceVersionChain; + +#ifdef __cplusplus +} +#endif diff --git a/sources/pvrsdk/Include/vulkan/vk_platform.h b/sources/pvrsdk/Include/vulkan/vk_platform.h new file mode 100755 index 00000000..72892992 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vk_platform.h @@ -0,0 +1,92 @@ +// +// File: vk_platform.h +// +/* +** Copyright (c) 2014-2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#ifndef VK_PLATFORM_H_ +#define VK_PLATFORM_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif // __cplusplus + +/* +*************************************************************************************************** +* Platform-specific directives and type declarations +*************************************************************************************************** +*/ + +/* Platform-specific calling convention macros. + * + * Platforms should define these so that Vulkan clients call Vulkan commands + * with the same calling conventions that the Vulkan implementation expects. + * + * VKAPI_ATTR - Placed before the return type in function declarations. + * Useful for C++11 and GCC/Clang-style function attribute syntax. + * VKAPI_CALL - Placed after the return type in function declarations. + * Useful for MSVC-style calling convention syntax. + * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. + * + * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); + * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); + */ +#if defined(_WIN32) + // On Windows, Vulkan commands use the stdcall convention + #define VKAPI_ATTR + #define VKAPI_CALL __stdcall + #define VKAPI_PTR VKAPI_CALL +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 + #error "Vulkan isn't supported for the 'armeabi' NDK ABI" +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) + // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" + // calling convention, i.e. float parameters are passed in registers. This + // is true even if the rest of the application passes floats on the stack, + // as it does by default when compiling for the armeabi-v7a NDK ABI. + #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) + #define VKAPI_CALL + #define VKAPI_PTR VKAPI_ATTR +#else + // On other platforms, use the default calling convention + #define VKAPI_ATTR + #define VKAPI_CALL + #define VKAPI_PTR +#endif + +#include + +#if !defined(VK_NO_STDINT_H) + #if defined(_MSC_VER) && (_MSC_VER < 1600) + typedef signed __int8 int8_t; + typedef unsigned __int8 uint8_t; + typedef signed __int16 int16_t; + typedef unsigned __int16 uint16_t; + typedef signed __int32 int32_t; + typedef unsigned __int32 uint32_t; + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #else + #include + #endif +#endif // !defined(VK_NO_STDINT_H) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan.h b/sources/pvrsdk/Include/vulkan/vulkan.h new file mode 100755 index 00000000..d05c8490 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan.h @@ -0,0 +1,79 @@ +#ifndef VULKAN_H_ +#define VULKAN_H_ 1 + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "vk_platform.h" +#include "vulkan_core.h" + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#include "vulkan_android.h" +#endif + + +#ifdef VK_USE_PLATFORM_IOS_MVK +#include "vulkan_ios.h" +#endif + + +#ifdef VK_USE_PLATFORM_MACOS_MVK +#include "vulkan_macos.h" +#endif + + +#ifdef VK_USE_PLATFORM_MIR_KHR +#include +#include "vulkan_mir.h" +#endif + + +#ifdef VK_USE_PLATFORM_VI_NN +#include "vulkan_vi.h" +#endif + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#include +#include "vulkan_wayland.h" +#endif + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include +#include "vulkan_win32.h" +#endif + + +#ifdef VK_USE_PLATFORM_XCB_KHR +#include +#include "vulkan_xcb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#include +#include "vulkan_xlib.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +#include +#include +#include "vulkan_xlib_xrandr.h" +#endif + +#endif // VULKAN_H_ diff --git a/sources/pvrsdk/Include/vulkan/vulkan_android.h b/sources/pvrsdk/Include/vulkan/vulkan_android.h new file mode 100755 index 00000000..07aaeda2 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_android.h @@ -0,0 +1,126 @@ +#ifndef VULKAN_ANDROID_H_ +#define VULKAN_ANDROID_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_android_surface 1 +struct ANativeWindow; + +#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6 +#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface" + +typedef VkFlags VkAndroidSurfaceCreateFlagsKHR; + +typedef struct VkAndroidSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAndroidSurfaceCreateFlagsKHR flags; + struct ANativeWindow* window; +} VkAndroidSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR( + VkInstance instance, + const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#define VK_ANDROID_external_memory_android_hardware_buffer 1 +struct AHardwareBuffer; + +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3 +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer" + +typedef struct VkAndroidHardwareBufferUsageANDROID { + VkStructureType sType; + void* pNext; + uint64_t androidHardwareBufferUsage; +} VkAndroidHardwareBufferUsageANDROID; + +typedef struct VkAndroidHardwareBufferPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeBits; +} VkAndroidHardwareBufferPropertiesANDROID; + +typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkFormat format; + uint64_t externalFormat; + VkFormatFeatureFlags formatFeatures; + VkComponentMapping samplerYcbcrConversionComponents; + VkSamplerYcbcrModelConversion suggestedYcbcrModel; + VkSamplerYcbcrRange suggestedYcbcrRange; + VkChromaLocation suggestedXChromaOffset; + VkChromaLocation suggestedYChromaOffset; +} VkAndroidHardwareBufferFormatPropertiesANDROID; + +typedef struct VkImportAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + struct AHardwareBuffer* buffer; +} VkImportAndroidHardwareBufferInfoANDROID; + +typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkMemoryGetAndroidHardwareBufferInfoANDROID; + +typedef struct VkExternalFormatANDROID { + VkStructureType sType; + void* pNext; + uint64_t externalFormat; +} VkExternalFormatANDROID; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID( + VkDevice device, + const struct AHardwareBuffer* buffer, + VkAndroidHardwareBufferPropertiesANDROID* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID( + VkDevice device, + const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, + struct AHardwareBuffer** pBuffer); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_core.h b/sources/pvrsdk/Include/vulkan/vulkan_core.h new file mode 100755 index 00000000..2cafcddf --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_core.h @@ -0,0 +1,7470 @@ +#ifndef VULKAN_CORE_H_ +#define VULKAN_CORE_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_VERSION_1_0 1 +#include "vk_platform.h" + +#define VK_MAKE_VERSION(major, minor, patch) \ + (((major) << 22) | ((minor) << 12) | (patch)) + +// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. +//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 + +// Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)// Patch version should always be set to 0 + +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22) +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff) +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff) +// Version of this file +#define VK_HEADER_VERSION 73 + + +#define VK_NULL_HANDLE 0 + + + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + + +#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE) +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; +#else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; +#endif +#endif + + + +typedef uint32_t VkFlags; +typedef uint32_t VkBool32; +typedef uint64_t VkDeviceSize; +typedef uint32_t VkSampleMask; + +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) +VK_DEFINE_HANDLE(VkCommandBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) + +#define VK_LOD_CLAMP_NONE 1000.0f +#define VK_REMAINING_MIP_LEVELS (~0U) +#define VK_REMAINING_ARRAY_LAYERS (~0U) +#define VK_WHOLE_SIZE (~0ULL) +#define VK_ATTACHMENT_UNUSED (~0U) +#define VK_TRUE 1 +#define VK_FALSE 0 +#define VK_QUEUE_FAMILY_IGNORED (~0U) +#define VK_SUBPASS_EXTERNAL (~0U) +#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256 +#define VK_UUID_SIZE 16 +#define VK_MAX_MEMORY_TYPES 32 +#define VK_MAX_MEMORY_HEAPS 16 +#define VK_MAX_EXTENSION_NAME_SIZE 256 +#define VK_MAX_DESCRIPTION_SIZE 256 + + +typedef enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, + VK_PIPELINE_CACHE_HEADER_VERSION_BEGIN_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + VK_PIPELINE_CACHE_HEADER_VERSION_END_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + VK_PIPELINE_CACHE_HEADER_VERSION_RANGE_SIZE = (VK_PIPELINE_CACHE_HEADER_VERSION_ONE - VK_PIPELINE_CACHE_HEADER_VERSION_ONE + 1), + VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheHeaderVersion; + +typedef enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, + VK_ERROR_INITIALIZATION_FAILED = -3, + VK_ERROR_DEVICE_LOST = -4, + VK_ERROR_MEMORY_MAP_FAILED = -5, + VK_ERROR_LAYER_NOT_PRESENT = -6, + VK_ERROR_EXTENSION_NOT_PRESENT = -7, + VK_ERROR_FEATURE_NOT_PRESENT = -8, + VK_ERROR_INCOMPATIBLE_DRIVER = -9, + VK_ERROR_TOO_MANY_OBJECTS = -10, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11, + VK_ERROR_FRAGMENTED_POOL = -12, + VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000, + VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003, + VK_ERROR_SURFACE_LOST_KHR = -1000000000, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, + VK_SUBOPTIMAL_KHR = 1000001003, + VK_ERROR_OUT_OF_DATE_KHR = -1000001004, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, + VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, + VK_ERROR_INVALID_SHADER_NV = -1000012000, + VK_ERROR_FRAGMENTATION_EXT = -1000161000, + VK_ERROR_NOT_PERMITTED_EXT = -1000174001, + VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, + VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, + VK_RESULT_BEGIN_RANGE = VK_ERROR_FRAGMENTED_POOL, + VK_RESULT_END_RANGE = VK_INCOMPLETE, + VK_RESULT_RANGE_SIZE = (VK_INCOMPLETE - VK_ERROR_FRAGMENTED_POOL + 1), + VK_RESULT_MAX_ENUM = 0x7FFFFFFF +} VkResult; + +typedef enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = 1000120000, + VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = 1000063000, + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000, + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX = 1000086000, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX = 1000086001, + VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX = 1000086002, + VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX = 1000086003, + VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX = 1000086004, + VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX = 1000086005, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, + VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, + VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, + VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = 1000130000, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = 1000130001, + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = 1000147000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = 1000161000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = 1000161001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = 1000161002, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = 1000161003, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = 1000161004, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, + VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO, + VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1), + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkStructureType; + +typedef enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, + VK_SYSTEM_ALLOCATION_SCOPE_BEGIN_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND, + VK_SYSTEM_ALLOCATION_SCOPE_END_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE, + VK_SYSTEM_ALLOCATION_SCOPE_RANGE_SIZE = (VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE - VK_SYSTEM_ALLOCATION_SCOPE_COMMAND + 1), + VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF +} VkSystemAllocationScope; + +typedef enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, + VK_INTERNAL_ALLOCATION_TYPE_BEGIN_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE, + VK_INTERNAL_ALLOCATION_TYPE_END_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE, + VK_INTERNAL_ALLOCATION_TYPE_RANGE_SIZE = (VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE - VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + 1), + VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkInternalAllocationType; + +typedef enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, + VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000, + VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006, + VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016, + VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026, + VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027, + VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033, + VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, + VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, + VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, + VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, + VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, + VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, + VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, + VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, + VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM, + VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM, + VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + VK_FORMAT_BEGIN_RANGE = VK_FORMAT_UNDEFINED, + VK_FORMAT_END_RANGE = VK_FORMAT_ASTC_12x12_SRGB_BLOCK, + VK_FORMAT_RANGE_SIZE = (VK_FORMAT_ASTC_12x12_SRGB_BLOCK - VK_FORMAT_UNDEFINED + 1), + VK_FORMAT_MAX_ENUM = 0x7FFFFFFF +} VkFormat; + +typedef enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, + VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D, + VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D, + VK_IMAGE_TYPE_RANGE_SIZE = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1), + VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageType; + +typedef enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, + VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_LINEAR, + VK_IMAGE_TILING_RANGE_SIZE = (VK_IMAGE_TILING_LINEAR - VK_IMAGE_TILING_OPTIMAL + 1), + VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF +} VkImageTiling; + +typedef enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, + VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE = VK_PHYSICAL_DEVICE_TYPE_OTHER, + VK_PHYSICAL_DEVICE_TYPE_END_RANGE = VK_PHYSICAL_DEVICE_TYPE_CPU, + VK_PHYSICAL_DEVICE_TYPE_RANGE_SIZE = (VK_PHYSICAL_DEVICE_TYPE_CPU - VK_PHYSICAL_DEVICE_TYPE_OTHER + 1), + VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkPhysicalDeviceType; + +typedef enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, + VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION, + VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP, + VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1), + VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkQueryType; + +typedef enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, + VK_SHARING_MODE_BEGIN_RANGE = VK_SHARING_MODE_EXCLUSIVE, + VK_SHARING_MODE_END_RANGE = VK_SHARING_MODE_CONCURRENT, + VK_SHARING_MODE_RANGE_SIZE = (VK_SHARING_MODE_CONCURRENT - VK_SHARING_MODE_EXCLUSIVE + 1), + VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSharingMode; + +typedef enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, + VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_PREINITIALIZED, + VK_IMAGE_LAYOUT_RANGE_SIZE = (VK_IMAGE_LAYOUT_PREINITIALIZED - VK_IMAGE_LAYOUT_UNDEFINED + 1), + VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF +} VkImageLayout; + +typedef enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, + VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_TYPE_1D, + VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, + VK_IMAGE_VIEW_TYPE_RANGE_SIZE = (VK_IMAGE_VIEW_TYPE_CUBE_ARRAY - VK_IMAGE_VIEW_TYPE_1D + 1), + VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageViewType; + +typedef enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, + VK_COMPONENT_SWIZZLE_BEGIN_RANGE = VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_END_RANGE = VK_COMPONENT_SWIZZLE_A, + VK_COMPONENT_SWIZZLE_RANGE_SIZE = (VK_COMPONENT_SWIZZLE_A - VK_COMPONENT_SWIZZLE_IDENTITY + 1), + VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF +} VkComponentSwizzle; + +typedef enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, + VK_VERTEX_INPUT_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_RATE_VERTEX, + VK_VERTEX_INPUT_RATE_END_RANGE = VK_VERTEX_INPUT_RATE_INSTANCE, + VK_VERTEX_INPUT_RATE_RANGE_SIZE = (VK_VERTEX_INPUT_RATE_INSTANCE - VK_VERTEX_INPUT_RATE_VERTEX + 1), + VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF +} VkVertexInputRate; + +typedef enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, + VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_PRIMITIVE_TOPOLOGY_POINT_LIST, + VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, + VK_PRIMITIVE_TOPOLOGY_RANGE_SIZE = (VK_PRIMITIVE_TOPOLOGY_PATCH_LIST - VK_PRIMITIVE_TOPOLOGY_POINT_LIST + 1), + VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF +} VkPrimitiveTopology; + +typedef enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, + VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, + VK_POLYGON_MODE_BEGIN_RANGE = VK_POLYGON_MODE_FILL, + VK_POLYGON_MODE_END_RANGE = VK_POLYGON_MODE_POINT, + VK_POLYGON_MODE_RANGE_SIZE = (VK_POLYGON_MODE_POINT - VK_POLYGON_MODE_FILL + 1), + VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF +} VkPolygonMode; + +typedef enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, + VK_FRONT_FACE_BEGIN_RANGE = VK_FRONT_FACE_COUNTER_CLOCKWISE, + VK_FRONT_FACE_END_RANGE = VK_FRONT_FACE_CLOCKWISE, + VK_FRONT_FACE_RANGE_SIZE = (VK_FRONT_FACE_CLOCKWISE - VK_FRONT_FACE_COUNTER_CLOCKWISE + 1), + VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF +} VkFrontFace; + +typedef enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, + VK_COMPARE_OP_BEGIN_RANGE = VK_COMPARE_OP_NEVER, + VK_COMPARE_OP_END_RANGE = VK_COMPARE_OP_ALWAYS, + VK_COMPARE_OP_RANGE_SIZE = (VK_COMPARE_OP_ALWAYS - VK_COMPARE_OP_NEVER + 1), + VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF +} VkCompareOp; + +typedef enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, + VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP, + VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DECREMENT_AND_WRAP, + VK_STENCIL_OP_RANGE_SIZE = (VK_STENCIL_OP_DECREMENT_AND_WRAP - VK_STENCIL_OP_KEEP + 1), + VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF +} VkStencilOp; + +typedef enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, + VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_CLEAR, + VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET, + VK_LOGIC_OP_RANGE_SIZE = (VK_LOGIC_OP_SET - VK_LOGIC_OP_CLEAR + 1), + VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF +} VkLogicOp; + +typedef enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, + VK_BLEND_FACTOR_BEGIN_RANGE = VK_BLEND_FACTOR_ZERO, + VK_BLEND_FACTOR_END_RANGE = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA, + VK_BLEND_FACTOR_RANGE_SIZE = (VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA - VK_BLEND_FACTOR_ZERO + 1), + VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF +} VkBlendFactor; + +typedef enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, + VK_BLEND_OP_ZERO_EXT = 1000148000, + VK_BLEND_OP_SRC_EXT = 1000148001, + VK_BLEND_OP_DST_EXT = 1000148002, + VK_BLEND_OP_SRC_OVER_EXT = 1000148003, + VK_BLEND_OP_DST_OVER_EXT = 1000148004, + VK_BLEND_OP_SRC_IN_EXT = 1000148005, + VK_BLEND_OP_DST_IN_EXT = 1000148006, + VK_BLEND_OP_SRC_OUT_EXT = 1000148007, + VK_BLEND_OP_DST_OUT_EXT = 1000148008, + VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, + VK_BLEND_OP_DST_ATOP_EXT = 1000148010, + VK_BLEND_OP_XOR_EXT = 1000148011, + VK_BLEND_OP_MULTIPLY_EXT = 1000148012, + VK_BLEND_OP_SCREEN_EXT = 1000148013, + VK_BLEND_OP_OVERLAY_EXT = 1000148014, + VK_BLEND_OP_DARKEN_EXT = 1000148015, + VK_BLEND_OP_LIGHTEN_EXT = 1000148016, + VK_BLEND_OP_COLORDODGE_EXT = 1000148017, + VK_BLEND_OP_COLORBURN_EXT = 1000148018, + VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, + VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, + VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, + VK_BLEND_OP_EXCLUSION_EXT = 1000148022, + VK_BLEND_OP_INVERT_EXT = 1000148023, + VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, + VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, + VK_BLEND_OP_LINEARBURN_EXT = 1000148026, + VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, + VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, + VK_BLEND_OP_PINLIGHT_EXT = 1000148029, + VK_BLEND_OP_HARDMIX_EXT = 1000148030, + VK_BLEND_OP_HSL_HUE_EXT = 1000148031, + VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, + VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, + VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, + VK_BLEND_OP_PLUS_EXT = 1000148035, + VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, + VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, + VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, + VK_BLEND_OP_MINUS_EXT = 1000148039, + VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, + VK_BLEND_OP_CONTRAST_EXT = 1000148041, + VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, + VK_BLEND_OP_RED_EXT = 1000148043, + VK_BLEND_OP_GREEN_EXT = 1000148044, + VK_BLEND_OP_BLUE_EXT = 1000148045, + VK_BLEND_OP_BEGIN_RANGE = VK_BLEND_OP_ADD, + VK_BLEND_OP_END_RANGE = VK_BLEND_OP_MAX, + VK_BLEND_OP_RANGE_SIZE = (VK_BLEND_OP_MAX - VK_BLEND_OP_ADD + 1), + VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF +} VkBlendOp; + +typedef enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT, + VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE, + VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1), + VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF +} VkDynamicState; + +typedef enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, + VK_FILTER_CUBIC_IMG = 1000015000, + VK_FILTER_BEGIN_RANGE = VK_FILTER_NEAREST, + VK_FILTER_END_RANGE = VK_FILTER_LINEAR, + VK_FILTER_RANGE_SIZE = (VK_FILTER_LINEAR - VK_FILTER_NEAREST + 1), + VK_FILTER_MAX_ENUM = 0x7FFFFFFF +} VkFilter; + +typedef enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, + VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE = VK_SAMPLER_MIPMAP_MODE_NEAREST, + VK_SAMPLER_MIPMAP_MODE_END_RANGE = VK_SAMPLER_MIPMAP_MODE_LINEAR, + VK_SAMPLER_MIPMAP_MODE_RANGE_SIZE = (VK_SAMPLER_MIPMAP_MODE_LINEAR - VK_SAMPLER_MIPMAP_MODE_NEAREST + 1), + VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerMipmapMode; + +typedef enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT, + VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, + VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1), + VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerAddressMode; + +typedef enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, + VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, + VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE, + VK_BORDER_COLOR_RANGE_SIZE = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1), + VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF +} VkBorderColor; + +typedef enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER, + VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, + VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1), + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorType; + +typedef enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, + VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD, + VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_LOAD_OP_RANGE_SIZE = (VK_ATTACHMENT_LOAD_OP_DONT_CARE - VK_ATTACHMENT_LOAD_OP_LOAD + 1), + VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentLoadOp; + +typedef enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, + VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE, + VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_RANGE_SIZE = (VK_ATTACHMENT_STORE_OP_DONT_CARE - VK_ATTACHMENT_STORE_OP_STORE + 1), + VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentStoreOp; + +typedef enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, + VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS, + VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE, + VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1), + VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF +} VkPipelineBindPoint; + +typedef enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, + VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + VK_COMMAND_BUFFER_LEVEL_END_RANGE = VK_COMMAND_BUFFER_LEVEL_SECONDARY, + VK_COMMAND_BUFFER_LEVEL_RANGE_SIZE = (VK_COMMAND_BUFFER_LEVEL_SECONDARY - VK_COMMAND_BUFFER_LEVEL_PRIMARY + 1), + VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferLevel; + +typedef enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16, + VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32, + VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1), + VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkIndexType; + +typedef enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, + VK_SUBPASS_CONTENTS_BEGIN_RANGE = VK_SUBPASS_CONTENTS_INLINE, + VK_SUBPASS_CONTENTS_END_RANGE = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS, + VK_SUBPASS_CONTENTS_RANGE_SIZE = (VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS - VK_SUBPASS_CONTENTS_INLINE + 1), + VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassContents; + +typedef enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000, + VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, + VK_OBJECT_TYPE_OBJECT_TABLE_NVX = 1000086000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX = 1000086001, + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + VK_OBJECT_TYPE_BEGIN_RANGE = VK_OBJECT_TYPE_UNKNOWN, + VK_OBJECT_TYPE_END_RANGE = VK_OBJECT_TYPE_COMMAND_POOL, + VK_OBJECT_TYPE_RANGE_SIZE = (VK_OBJECT_TYPE_COMMAND_POOL - VK_OBJECT_TYPE_UNKNOWN + 1), + VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkObjectType; + +typedef VkFlags VkInstanceCreateFlags; + +typedef enum VkFormatFeatureFlagBits { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000, + VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = 0x00010000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFormatFeatureFlagBits; +typedef VkFlags VkFormatFeatureFlags; + +typedef enum VkImageUsageFlagBits { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, + VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageUsageFlagBits; +typedef VkFlags VkImageUsageFlags; + +typedef enum VkImageCreateFlagBits { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, + VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100, + VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800, + VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT, + VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT, + VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageCreateFlagBits; +typedef VkFlags VkImageCreateFlags; + +typedef enum VkSampleCountFlagBits { + VK_SAMPLE_COUNT_1_BIT = 0x00000001, + VK_SAMPLE_COUNT_2_BIT = 0x00000002, + VK_SAMPLE_COUNT_4_BIT = 0x00000004, + VK_SAMPLE_COUNT_8_BIT = 0x00000008, + VK_SAMPLE_COUNT_16_BIT = 0x00000010, + VK_SAMPLE_COUNT_32_BIT = 0x00000020, + VK_SAMPLE_COUNT_64_BIT = 0x00000040, + VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSampleCountFlagBits; +typedef VkFlags VkSampleCountFlags; + +typedef enum VkQueueFlagBits { + VK_QUEUE_GRAPHICS_BIT = 0x00000001, + VK_QUEUE_COMPUTE_BIT = 0x00000002, + VK_QUEUE_TRANSFER_BIT = 0x00000004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, + VK_QUEUE_PROTECTED_BIT = 0x00000010, + VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueueFlagBits; +typedef VkFlags VkQueueFlags; + +typedef enum VkMemoryPropertyFlagBits { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, + VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020, + VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryPropertyFlagBits; +typedef VkFlags VkMemoryPropertyFlags; + +typedef enum VkMemoryHeapFlagBits { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryHeapFlagBits; +typedef VkFlags VkMemoryHeapFlags; +typedef VkFlags VkDeviceCreateFlags; + +typedef enum VkDeviceQueueCreateFlagBits { + VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001, + VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDeviceQueueCreateFlagBits; +typedef VkFlags VkDeviceQueueCreateFlags; + +typedef enum VkPipelineStageFlagBits { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, + VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, + VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x00020000, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineStageFlagBits; +typedef VkFlags VkPipelineStageFlags; +typedef VkFlags VkMemoryMapFlags; + +typedef enum VkImageAspectFlagBits { + VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, + VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, + VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, + VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, + VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, + VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, + VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT, + VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageAspectFlagBits; +typedef VkFlags VkImageAspectFlags; + +typedef enum VkSparseImageFormatFlagBits { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, + VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseImageFormatFlagBits; +typedef VkFlags VkSparseImageFormatFlags; + +typedef enum VkSparseMemoryBindFlagBits { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, + VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseMemoryBindFlagBits; +typedef VkFlags VkSparseMemoryBindFlags; + +typedef enum VkFenceCreateFlagBits { + VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, + VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceCreateFlagBits; +typedef VkFlags VkFenceCreateFlags; +typedef VkFlags VkSemaphoreCreateFlags; +typedef VkFlags VkEventCreateFlags; +typedef VkFlags VkQueryPoolCreateFlags; + +typedef enum VkQueryPipelineStatisticFlagBits { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, + VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPipelineStatisticFlagBits; +typedef VkFlags VkQueryPipelineStatisticFlags; + +typedef enum VkQueryResultFlagBits { + VK_QUERY_RESULT_64_BIT = 0x00000001, + VK_QUERY_RESULT_WAIT_BIT = 0x00000002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, + VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryResultFlagBits; +typedef VkFlags VkQueryResultFlags; + +typedef enum VkBufferCreateFlagBits { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferCreateFlagBits; +typedef VkFlags VkBufferCreateFlags; + +typedef enum VkBufferUsageFlagBits { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, + VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferUsageFlagBits; +typedef VkFlags VkBufferUsageFlags; +typedef VkFlags VkBufferViewCreateFlags; +typedef VkFlags VkImageViewCreateFlags; +typedef VkFlags VkShaderModuleCreateFlags; +typedef VkFlags VkPipelineCacheCreateFlags; + +typedef enum VkPipelineCreateFlagBits { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, + VK_PIPELINE_CREATE_DISPATCH_BASE = 0x00000010, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, + VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreateFlagBits; +typedef VkFlags VkPipelineCreateFlags; +typedef VkFlags VkPipelineShaderStageCreateFlags; + +typedef enum VkShaderStageFlagBits { + VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, + VK_SHADER_STAGE_ALL = 0x7FFFFFFF, + VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderStageFlagBits; +typedef VkFlags VkPipelineVertexInputStateCreateFlags; +typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; +typedef VkFlags VkPipelineTessellationStateCreateFlags; +typedef VkFlags VkPipelineViewportStateCreateFlags; +typedef VkFlags VkPipelineRasterizationStateCreateFlags; + +typedef enum VkCullModeFlagBits { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x00000001, + VK_CULL_MODE_BACK_BIT = 0x00000002, + VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, + VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCullModeFlagBits; +typedef VkFlags VkCullModeFlags; +typedef VkFlags VkPipelineMultisampleStateCreateFlags; +typedef VkFlags VkPipelineDepthStencilStateCreateFlags; +typedef VkFlags VkPipelineColorBlendStateCreateFlags; + +typedef enum VkColorComponentFlagBits { + VK_COLOR_COMPONENT_R_BIT = 0x00000001, + VK_COLOR_COMPONENT_G_BIT = 0x00000002, + VK_COLOR_COMPONENT_B_BIT = 0x00000004, + VK_COLOR_COMPONENT_A_BIT = 0x00000008, + VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkColorComponentFlagBits; +typedef VkFlags VkColorComponentFlags; +typedef VkFlags VkPipelineDynamicStateCreateFlags; +typedef VkFlags VkPipelineLayoutCreateFlags; +typedef VkFlags VkShaderStageFlags; +typedef VkFlags VkSamplerCreateFlags; + +typedef enum VkDescriptorSetLayoutCreateFlagBits { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = 0x00000002, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorSetLayoutCreateFlagBits; +typedef VkFlags VkDescriptorSetLayoutCreateFlags; + +typedef enum VkDescriptorPoolCreateFlagBits { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = 0x00000002, + VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorPoolCreateFlagBits; +typedef VkFlags VkDescriptorPoolCreateFlags; +typedef VkFlags VkDescriptorPoolResetFlags; +typedef VkFlags VkFramebufferCreateFlags; +typedef VkFlags VkRenderPassCreateFlags; + +typedef enum VkAttachmentDescriptionFlagBits { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, + VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentDescriptionFlagBits; +typedef VkFlags VkAttachmentDescriptionFlags; + +typedef enum VkSubpassDescriptionFlagBits { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, + VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassDescriptionFlagBits; +typedef VkFlags VkSubpassDescriptionFlags; + +typedef enum VkAccessFlagBits { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, + VK_ACCESS_INDEX_READ_BIT = 0x00000002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, + VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, + VK_ACCESS_SHADER_READ_BIT = 0x00000020, + VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, + VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, + VK_ACCESS_HOST_READ_BIT = 0x00002000, + VK_ACCESS_HOST_WRITE_BIT = 0x00004000, + VK_ACCESS_MEMORY_READ_BIT = 0x00008000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, + VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 0x00020000, + VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x00040000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, + VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAccessFlagBits; +typedef VkFlags VkAccessFlags; + +typedef enum VkDependencyFlagBits { + VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, + VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004, + VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002, + VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT, + VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDependencyFlagBits; +typedef VkFlags VkDependencyFlags; + +typedef enum VkCommandPoolCreateFlagBits { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, + VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004, + VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolCreateFlagBits; +typedef VkFlags VkCommandPoolCreateFlags; + +typedef enum VkCommandPoolResetFlagBits { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolResetFlagBits; +typedef VkFlags VkCommandPoolResetFlags; + +typedef enum VkCommandBufferUsageFlagBits { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, + VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferUsageFlagBits; +typedef VkFlags VkCommandBufferUsageFlags; + +typedef enum VkQueryControlFlagBits { + VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, + VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryControlFlagBits; +typedef VkFlags VkQueryControlFlags; + +typedef enum VkCommandBufferResetFlagBits { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferResetFlagBits; +typedef VkFlags VkCommandBufferResetFlags; + +typedef enum VkStencilFaceFlagBits { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FRONT_AND_BACK = 0x00000003, + VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkStencilFaceFlagBits; +typedef VkFlags VkStencilFaceFlags; + +typedef struct VkApplicationInfo { + VkStructureType sType; + const void* pNext; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; +} VkApplicationInfo; + +typedef struct VkInstanceCreateInfo { + VkStructureType sType; + const void* pNext; + VkInstanceCreateFlags flags; + const VkApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; +} VkInstanceCreateInfo; + +typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + +typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef struct VkAllocationCallbacks { + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; +} VkAllocationCallbacks; + +typedef struct VkPhysicalDeviceFeatures { + VkBool32 robustBufferAccess; + VkBool32 fullDrawIndexUint32; + VkBool32 imageCubeArray; + VkBool32 independentBlend; + VkBool32 geometryShader; + VkBool32 tessellationShader; + VkBool32 sampleRateShading; + VkBool32 dualSrcBlend; + VkBool32 logicOp; + VkBool32 multiDrawIndirect; + VkBool32 drawIndirectFirstInstance; + VkBool32 depthClamp; + VkBool32 depthBiasClamp; + VkBool32 fillModeNonSolid; + VkBool32 depthBounds; + VkBool32 wideLines; + VkBool32 largePoints; + VkBool32 alphaToOne; + VkBool32 multiViewport; + VkBool32 samplerAnisotropy; + VkBool32 textureCompressionETC2; + VkBool32 textureCompressionASTC_LDR; + VkBool32 textureCompressionBC; + VkBool32 occlusionQueryPrecise; + VkBool32 pipelineStatisticsQuery; + VkBool32 vertexPipelineStoresAndAtomics; + VkBool32 fragmentStoresAndAtomics; + VkBool32 shaderTessellationAndGeometryPointSize; + VkBool32 shaderImageGatherExtended; + VkBool32 shaderStorageImageExtendedFormats; + VkBool32 shaderStorageImageMultisample; + VkBool32 shaderStorageImageReadWithoutFormat; + VkBool32 shaderStorageImageWriteWithoutFormat; + VkBool32 shaderUniformBufferArrayDynamicIndexing; + VkBool32 shaderSampledImageArrayDynamicIndexing; + VkBool32 shaderStorageBufferArrayDynamicIndexing; + VkBool32 shaderStorageImageArrayDynamicIndexing; + VkBool32 shaderClipDistance; + VkBool32 shaderCullDistance; + VkBool32 shaderFloat64; + VkBool32 shaderInt64; + VkBool32 shaderInt16; + VkBool32 shaderResourceResidency; + VkBool32 shaderResourceMinLod; + VkBool32 sparseBinding; + VkBool32 sparseResidencyBuffer; + VkBool32 sparseResidencyImage2D; + VkBool32 sparseResidencyImage3D; + VkBool32 sparseResidency2Samples; + VkBool32 sparseResidency4Samples; + VkBool32 sparseResidency8Samples; + VkBool32 sparseResidency16Samples; + VkBool32 sparseResidencyAliased; + VkBool32 variableMultisampleRate; + VkBool32 inheritedQueries; +} VkPhysicalDeviceFeatures; + +typedef struct VkFormatProperties { + VkFormatFeatureFlags linearTilingFeatures; + VkFormatFeatureFlags optimalTilingFeatures; + VkFormatFeatureFlags bufferFeatures; +} VkFormatProperties; + +typedef struct VkExtent3D { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkExtent3D; + +typedef struct VkImageFormatProperties { + VkExtent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + VkSampleCountFlags sampleCounts; + VkDeviceSize maxResourceSize; +} VkImageFormatProperties; + +typedef struct VkPhysicalDeviceLimits { + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + VkDeviceSize bufferImageGranularity; + VkDeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + VkDeviceSize minTexelBufferOffsetAlignment; + VkDeviceSize minUniformBufferOffsetAlignment; + VkDeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + VkSampleCountFlags framebufferColorSampleCounts; + VkSampleCountFlags framebufferDepthSampleCounts; + VkSampleCountFlags framebufferStencilSampleCounts; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + VkSampleCountFlags sampledImageColorSampleCounts; + VkSampleCountFlags sampledImageIntegerSampleCounts; + VkSampleCountFlags sampledImageDepthSampleCounts; + VkSampleCountFlags sampledImageStencilSampleCounts; + VkSampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + VkBool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + VkBool32 strictLines; + VkBool32 standardSampleLocations; + VkDeviceSize optimalBufferCopyOffsetAlignment; + VkDeviceSize optimalBufferCopyRowPitchAlignment; + VkDeviceSize nonCoherentAtomSize; +} VkPhysicalDeviceLimits; + +typedef struct VkPhysicalDeviceSparseProperties { + VkBool32 residencyStandard2DBlockShape; + VkBool32 residencyStandard2DMultisampleBlockShape; + VkBool32 residencyStandard3DBlockShape; + VkBool32 residencyAlignedMipSize; + VkBool32 residencyNonResidentStrict; +} VkPhysicalDeviceSparseProperties; + +typedef struct VkPhysicalDeviceProperties { + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +} VkPhysicalDeviceProperties; + +typedef struct VkQueueFamilyProperties { + VkQueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + VkExtent3D minImageTransferGranularity; +} VkQueueFamilyProperties; + +typedef struct VkMemoryType { + VkMemoryPropertyFlags propertyFlags; + uint32_t heapIndex; +} VkMemoryType; + +typedef struct VkMemoryHeap { + VkDeviceSize size; + VkMemoryHeapFlags flags; +} VkMemoryHeap; + +typedef struct VkPhysicalDeviceMemoryProperties { + uint32_t memoryTypeCount; + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryProperties; + +typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +typedef struct VkExtensionProperties { + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; +} VkExtensionProperties; + +typedef struct VkLayerProperties { + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkLayerProperties; + +typedef struct VkSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + const VkPipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const VkCommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkSubmitInfo; + +typedef struct VkMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeIndex; +} VkMemoryAllocateInfo; + +typedef struct VkMappedMemoryRange { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMappedMemoryRange; + +typedef struct VkMemoryRequirements { + VkDeviceSize size; + VkDeviceSize alignment; + uint32_t memoryTypeBits; +} VkMemoryRequirements; + +typedef struct VkSparseImageFormatProperties { + VkImageAspectFlags aspectMask; + VkExtent3D imageGranularity; + VkSparseImageFormatFlags flags; +} VkSparseImageFormatProperties; + +typedef struct VkSparseImageMemoryRequirements { + VkSparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + VkDeviceSize imageMipTailSize; + VkDeviceSize imageMipTailOffset; + VkDeviceSize imageMipTailStride; +} VkSparseImageMemoryRequirements; + +typedef struct VkSparseMemoryBind { + VkDeviceSize resourceOffset; + VkDeviceSize size; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseMemoryBind; + +typedef struct VkSparseBufferMemoryBindInfo { + VkBuffer buffer; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseBufferMemoryBindInfo; + +typedef struct VkSparseImageOpaqueMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseImageOpaqueMemoryBindInfo; + +typedef struct VkImageSubresource { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; +} VkImageSubresource; + +typedef struct VkOffset3D { + int32_t x; + int32_t y; + int32_t z; +} VkOffset3D; + +typedef struct VkSparseImageMemoryBind { + VkImageSubresource subresource; + VkOffset3D offset; + VkExtent3D extent; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseImageMemoryBind; + +typedef struct VkSparseImageMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseImageMemoryBind* pBinds; +} VkSparseImageMemoryBindInfo; + +typedef struct VkBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const VkSparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const VkSparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkBindSparseInfo; + +typedef struct VkFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFenceCreateFlags flags; +} VkFenceCreateInfo; + +typedef struct VkSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreCreateFlags flags; +} VkSemaphoreCreateInfo; + +typedef struct VkEventCreateInfo { + VkStructureType sType; + const void* pNext; + VkEventCreateFlags flags; +} VkEventCreateInfo; + +typedef struct VkQueryPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueryPoolCreateFlags flags; + VkQueryType queryType; + uint32_t queryCount; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkQueryPoolCreateInfo; + +typedef struct VkBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkDeviceSize size; + VkBufferUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkBufferCreateInfo; + +typedef struct VkBufferViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferViewCreateFlags flags; + VkBuffer buffer; + VkFormat format; + VkDeviceSize offset; + VkDeviceSize range; +} VkBufferViewCreateInfo; + +typedef struct VkImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageType imageType; + VkFormat format; + VkExtent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + VkSampleCountFlagBits samples; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkImageLayout initialLayout; +} VkImageCreateInfo; + +typedef struct VkSubresourceLayout { + VkDeviceSize offset; + VkDeviceSize size; + VkDeviceSize rowPitch; + VkDeviceSize arrayPitch; + VkDeviceSize depthPitch; +} VkSubresourceLayout; + +typedef struct VkComponentMapping { + VkComponentSwizzle r; + VkComponentSwizzle g; + VkComponentSwizzle b; + VkComponentSwizzle a; +} VkComponentMapping; + +typedef struct VkImageSubresourceRange { + VkImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceRange; + +typedef struct VkImageViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageViewCreateFlags flags; + VkImage image; + VkImageViewType viewType; + VkFormat format; + VkComponentMapping components; + VkImageSubresourceRange subresourceRange; +} VkImageViewCreateInfo; + +typedef struct VkShaderModuleCreateInfo { + VkStructureType sType; + const void* pNext; + VkShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; +} VkShaderModuleCreateInfo; + +typedef struct VkPipelineCacheCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; +} VkPipelineCacheCreateInfo; + +typedef struct VkSpecializationMapEntry { + uint32_t constantID; + uint32_t offset; + size_t size; +} VkSpecializationMapEntry; + +typedef struct VkSpecializationInfo { + uint32_t mapEntryCount; + const VkSpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; +} VkSpecializationInfo; + +typedef struct VkPipelineShaderStageCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineShaderStageCreateFlags flags; + VkShaderStageFlagBits stage; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; +} VkPipelineShaderStageCreateInfo; + +typedef struct VkVertexInputBindingDescription { + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; +} VkVertexInputBindingDescription; + +typedef struct VkVertexInputAttributeDescription { + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription; + +typedef struct VkPipelineVertexInputStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VkVertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; +} VkPipelineVertexInputStateCreateInfo; + +typedef struct VkPipelineInputAssemblyStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineInputAssemblyStateCreateFlags flags; + VkPrimitiveTopology topology; + VkBool32 primitiveRestartEnable; +} VkPipelineInputAssemblyStateCreateInfo; + +typedef struct VkPipelineTessellationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; +} VkPipelineTessellationStateCreateInfo; + +typedef struct VkViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} VkViewport; + +typedef struct VkOffset2D { + int32_t x; + int32_t y; +} VkOffset2D; + +typedef struct VkExtent2D { + uint32_t width; + uint32_t height; +} VkExtent2D; + +typedef struct VkRect2D { + VkOffset2D offset; + VkExtent2D extent; +} VkRect2D; + +typedef struct VkPipelineViewportStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const VkViewport* pViewports; + uint32_t scissorCount; + const VkRect2D* pScissors; +} VkPipelineViewportStateCreateInfo; + +typedef struct VkPipelineRasterizationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateCreateFlags flags; + VkBool32 depthClampEnable; + VkBool32 rasterizerDiscardEnable; + VkPolygonMode polygonMode; + VkCullModeFlags cullMode; + VkFrontFace frontFace; + VkBool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; +} VkPipelineRasterizationStateCreateInfo; + +typedef struct VkPipelineMultisampleStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineMultisampleStateCreateFlags flags; + VkSampleCountFlagBits rasterizationSamples; + VkBool32 sampleShadingEnable; + float minSampleShading; + const VkSampleMask* pSampleMask; + VkBool32 alphaToCoverageEnable; + VkBool32 alphaToOneEnable; +} VkPipelineMultisampleStateCreateInfo; + +typedef struct VkStencilOpState { + VkStencilOp failOp; + VkStencilOp passOp; + VkStencilOp depthFailOp; + VkCompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; +} VkStencilOpState; + +typedef struct VkPipelineDepthStencilStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDepthStencilStateCreateFlags flags; + VkBool32 depthTestEnable; + VkBool32 depthWriteEnable; + VkCompareOp depthCompareOp; + VkBool32 depthBoundsTestEnable; + VkBool32 stencilTestEnable; + VkStencilOpState front; + VkStencilOpState back; + float minDepthBounds; + float maxDepthBounds; +} VkPipelineDepthStencilStateCreateInfo; + +typedef struct VkPipelineColorBlendAttachmentState { + VkBool32 blendEnable; + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; + VkColorComponentFlags colorWriteMask; +} VkPipelineColorBlendAttachmentState; + +typedef struct VkPipelineColorBlendStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineColorBlendStateCreateFlags flags; + VkBool32 logicOpEnable; + VkLogicOp logicOp; + uint32_t attachmentCount; + const VkPipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; +} VkPipelineColorBlendStateCreateInfo; + +typedef struct VkPipelineDynamicStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const VkDynamicState* pDynamicStates; +} VkPipelineDynamicStateCreateInfo; + +typedef struct VkGraphicsPipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; + const VkPipelineViewportStateCreateInfo* pViewportState; + const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkRenderPass renderPass; + uint32_t subpass; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkGraphicsPipelineCreateInfo; + +typedef struct VkComputePipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + VkPipelineShaderStageCreateInfo stage; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkComputePipelineCreateInfo; + +typedef struct VkPushConstantRange { + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; +} VkPushConstantRange; + +typedef struct VkPipelineLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkPipelineLayoutCreateInfo; + +typedef struct VkSamplerCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerCreateFlags flags; + VkFilter magFilter; + VkFilter minFilter; + VkSamplerMipmapMode mipmapMode; + VkSamplerAddressMode addressModeU; + VkSamplerAddressMode addressModeV; + VkSamplerAddressMode addressModeW; + float mipLodBias; + VkBool32 anisotropyEnable; + float maxAnisotropy; + VkBool32 compareEnable; + VkCompareOp compareOp; + float minLod; + float maxLod; + VkBorderColor borderColor; + VkBool32 unnormalizedCoordinates; +} VkSamplerCreateInfo; + +typedef struct VkDescriptorSetLayoutBinding { + uint32_t binding; + VkDescriptorType descriptorType; + uint32_t descriptorCount; + VkShaderStageFlags stageFlags; + const VkSampler* pImmutableSamplers; +} VkDescriptorSetLayoutBinding; + +typedef struct VkDescriptorSetLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const VkDescriptorSetLayoutBinding* pBindings; +} VkDescriptorSetLayoutCreateInfo; + +typedef struct VkDescriptorPoolSize { + VkDescriptorType type; + uint32_t descriptorCount; +} VkDescriptorPoolSize; + +typedef struct VkDescriptorPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const VkDescriptorPoolSize* pPoolSizes; +} VkDescriptorPoolCreateInfo; + +typedef struct VkDescriptorSetAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkDescriptorSetAllocateInfo; + +typedef struct VkDescriptorImageInfo { + VkSampler sampler; + VkImageView imageView; + VkImageLayout imageLayout; +} VkDescriptorImageInfo; + +typedef struct VkDescriptorBufferInfo { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize range; +} VkDescriptorBufferInfo; + +typedef struct VkWriteDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + const VkDescriptorImageInfo* pImageInfo; + const VkDescriptorBufferInfo* pBufferInfo; + const VkBufferView* pTexelBufferView; +} VkWriteDescriptorSet; + +typedef struct VkCopyDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; +} VkCopyDescriptorSet; + +typedef struct VkFramebufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkFramebufferCreateFlags flags; + VkRenderPass renderPass; + uint32_t attachmentCount; + const VkImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; +} VkFramebufferCreateInfo; + +typedef struct VkAttachmentDescription { + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription; + +typedef struct VkAttachmentReference { + uint32_t attachment; + VkImageLayout layout; +} VkAttachmentReference; + +typedef struct VkSubpassDescription { + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const VkAttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference* pColorAttachments; + const VkAttachmentReference* pResolveAttachments; + const VkAttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription; + +typedef struct VkSubpassDependency { + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; +} VkSubpassDependency; + +typedef struct VkRenderPassCreateInfo { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency* pDependencies; +} VkRenderPassCreateInfo; + +typedef struct VkCommandPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; +} VkCommandPoolCreateInfo; + +typedef struct VkCommandBufferAllocateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPool commandPool; + VkCommandBufferLevel level; + uint32_t commandBufferCount; +} VkCommandBufferAllocateInfo; + +typedef struct VkCommandBufferInheritanceInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + uint32_t subpass; + VkFramebuffer framebuffer; + VkBool32 occlusionQueryEnable; + VkQueryControlFlags queryFlags; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkCommandBufferInheritanceInfo; + +typedef struct VkCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + VkCommandBufferUsageFlags flags; + const VkCommandBufferInheritanceInfo* pInheritanceInfo; +} VkCommandBufferBeginInfo; + +typedef struct VkBufferCopy { + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy; + +typedef struct VkImageSubresourceLayers { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceLayers; + +typedef struct VkImageCopy { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy; + +typedef struct VkImageBlit { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit; + +typedef struct VkBufferImageCopy { + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy; + +typedef union VkClearColorValue { + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; +} VkClearColorValue; + +typedef struct VkClearDepthStencilValue { + float depth; + uint32_t stencil; +} VkClearDepthStencilValue; + +typedef union VkClearValue { + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +} VkClearValue; + +typedef struct VkClearAttachment { + VkImageAspectFlags aspectMask; + uint32_t colorAttachment; + VkClearValue clearValue; +} VkClearAttachment; + +typedef struct VkClearRect { + VkRect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkClearRect; + +typedef struct VkImageResolve { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve; + +typedef struct VkMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; +} VkMemoryBarrier; + +typedef struct VkBufferMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier; + +typedef struct VkImageMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier; + +typedef struct VkRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + VkFramebuffer framebuffer; + VkRect2D renderArea; + uint32_t clearValueCount; + const VkClearValue* pClearValues; +} VkRenderPassBeginInfo; + +typedef struct VkDispatchIndirectCommand { + uint32_t x; + uint32_t y; + uint32_t z; +} VkDispatchIndirectCommand; + +typedef struct VkDrawIndexedIndirectCommand { + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; +} VkDrawIndexedIndirectCommand; + +typedef struct VkDrawIndirectCommand { + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; +} VkDrawIndirectCommand; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); +typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); +typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); +typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); +typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); +typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); +typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); +typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); +typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); +typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); +typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); +typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); +typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); +typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); +typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); +typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); +typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); +typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); + +VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( + VkInstance instance, + const char* pName); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( + VkDevice device, + const char* pName); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( + VkDevice device, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( + VkDevice device, + uint32_t queueFamilyIndex, + uint32_t queueIndex, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo* pSubmits, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( + VkQueue queue); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( + VkDevice device, + const VkMemoryAllocateInfo* pAllocateInfo, + const VkAllocationCallbacks* pAllocator, + VkDeviceMemory* pMemory); + +VKAPI_ATTR void VKAPI_CALL vkFreeMemory( + VkDevice device, + VkDeviceMemory memory, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize offset, + VkDeviceSize size, + VkMemoryMapFlags flags, + void** ppData); + +VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( + VkDevice device, + VkDeviceMemory memory); + +VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize* pCommittedMemoryInBytes); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( + VkDevice device, + VkBuffer buffer, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( + VkDevice device, + VkImage image, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( + VkDevice device, + VkBuffer buffer, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( + VkDevice device, + VkImage image, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( + VkDevice device, + VkImage image, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( + VkQueue queue, + uint32_t bindInfoCount, + const VkBindSparseInfo* pBindInfo, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( + VkDevice device, + const VkFenceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFence( + VkDevice device, + VkFence fence, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( + VkDevice device, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences, + VkBool32 waitAll, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( + VkDevice device, + const VkSemaphoreCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSemaphore* pSemaphore); + +VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( + VkDevice device, + VkSemaphore semaphore, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( + VkDevice device, + const VkEventCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkEvent* pEvent); + +VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( + VkDevice device, + VkEvent event, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( + VkDevice device, + const VkQueryPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkQueryPool* pQueryPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( + VkDevice device, + VkQueryPool queryPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + void* pData, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( + VkDevice device, + const VkBufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBuffer* pBuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( + VkDevice device, + VkBuffer buffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( + VkDevice device, + const VkBufferViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBufferView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( + VkDevice device, + VkBufferView bufferView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( + VkDevice device, + const VkImageCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImage* pImage); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImage( + VkDevice device, + VkImage image, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( + VkDevice device, + VkImage image, + const VkImageSubresource* pSubresource, + VkSubresourceLayout* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( + VkDevice device, + const VkImageViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImageView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( + VkDevice device, + VkImageView imageView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkShaderModule* pShaderModule); + +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( + VkDevice device, + VkShaderModule shaderModule, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( + VkDevice device, + const VkPipelineCacheCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineCache* pPipelineCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( + VkDevice device, + VkPipelineCache pipelineCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( + VkDevice device, + VkPipelineCache pipelineCache, + size_t* pDataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( + VkDevice device, + VkPipelineCache dstCache, + uint32_t srcCacheCount, + const VkPipelineCache* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( + VkDevice device, + VkPipeline pipeline, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( + VkDevice device, + const VkPipelineLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineLayout* pPipelineLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( + VkDevice device, + VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( + VkDevice device, + const VkSamplerCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSampler* pSampler); + +VKAPI_ATTR void VKAPI_CALL vkDestroySampler( + VkDevice device, + VkSampler sampler, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorSetLayout* pSetLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( + VkDevice device, + VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( + VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( + VkDevice device, + VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( + VkDevice device, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet* pDescriptorCopies); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( + VkDevice device, + const VkFramebufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFramebuffer* pFramebuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( + VkDevice device, + VkFramebuffer framebuffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( + VkDevice device, + const VkRenderPassCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( + VkDevice device, + VkRenderPass renderPass, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( + VkDevice device, + VkRenderPass renderPass, + VkExtent2D* pGranularity); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( + VkDevice device, + const VkCommandPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCommandPool* pCommandPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( + VkDevice device, + VkCommandPool commandPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( + VkDevice device, + const VkCommandBufferAllocateInfo* pAllocateInfo, + VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( + VkDevice device, + VkCommandPool commandPool, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( + VkCommandBuffer commandBuffer, + VkCommandBufferResetFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( + VkCommandBuffer commandBuffer, + uint32_t firstScissor, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( + VkCommandBuffer commandBuffer, + float lineWidth); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( + VkCommandBuffer commandBuffer, + float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( + VkCommandBuffer commandBuffer, + const float blendConstants[4]); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( + VkCommandBuffer commandBuffer, + float minDepthBounds, + float maxDepthBounds); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t compareMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t writeMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t reference); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets, + uint32_t dynamicOffsetCount, + const uint32_t* pDynamicOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdDraw( + VkCommandBuffer commandBuffer, + uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( + VkCommandBuffer commandBuffer, + uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageBlit* pRegions, + VkFilter filter); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize dataSize, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize size, + uint32_t data); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearColorValue* pColor, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue* pDepthStencil, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkClearAttachment* pAttachments, + uint32_t rectCount, + const VkClearRect* pRects); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageResolve* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( + VkCommandBuffer commandBuffer, + VkPipelineLayout layout, + VkShaderStageFlags stageFlags, + uint32_t offset, + uint32_t size, + const void* pValues); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( + VkCommandBuffer commandBuffer, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); +#endif + +#define VK_VERSION_1_1 1 +// Vulkan 1.1 version number +#define VK_API_VERSION_1_1 VK_MAKE_VERSION(1, 1, 0)// Patch version should always be set to 0 + + +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) + +#define VK_MAX_DEVICE_GROUP_SIZE 32 +#define VK_LUID_SIZE 8 +#define VK_QUEUE_FAMILY_EXTERNAL (~0U-1) + + +typedef enum VkPointClippingBehavior { + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1, + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_BEGIN_RANGE = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_END_RANGE = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_RANGE_SIZE = (VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY - VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES + 1), + VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPointClippingBehavior; + +typedef enum VkTessellationDomainOrigin { + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1, + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_BEGIN_RANGE = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_END_RANGE = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_RANGE_SIZE = (VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT - VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT + 1), + VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF +} VkTessellationDomainOrigin; + +typedef enum VkSamplerYcbcrModelConversion { + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_BEGIN_RANGE = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_END_RANGE = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RANGE_SIZE = (VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 - VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY + 1), + VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrModelConversion; + +typedef enum VkSamplerYcbcrRange { + VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1, + VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_BEGIN_RANGE = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_END_RANGE = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_RANGE_SIZE = (VK_SAMPLER_YCBCR_RANGE_ITU_NARROW - VK_SAMPLER_YCBCR_RANGE_ITU_FULL + 1), + VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrRange; + +typedef enum VkChromaLocation { + VK_CHROMA_LOCATION_COSITED_EVEN = 0, + VK_CHROMA_LOCATION_MIDPOINT = 1, + VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_BEGIN_RANGE = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_END_RANGE = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_RANGE_SIZE = (VK_CHROMA_LOCATION_MIDPOINT - VK_CHROMA_LOCATION_COSITED_EVEN + 1), + VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF +} VkChromaLocation; + +typedef enum VkDescriptorUpdateTemplateType { + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_END_RANGE = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET - VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET + 1), + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorUpdateTemplateType; + + +typedef enum VkSubgroupFeatureFlagBits { + VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, + VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, + VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, + VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, + VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, + VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, + VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, + VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, + VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, + VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubgroupFeatureFlagBits; +typedef VkFlags VkSubgroupFeatureFlags; + +typedef enum VkPeerMemoryFeatureFlagBits { + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008, + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, + VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPeerMemoryFeatureFlagBits; +typedef VkFlags VkPeerMemoryFeatureFlags; + +typedef enum VkMemoryAllocateFlagBits { + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001, + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryAllocateFlagBits; +typedef VkFlags VkMemoryAllocateFlags; +typedef VkFlags VkCommandPoolTrimFlags; +typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; + +typedef enum VkExternalMemoryHandleTypeFlagBits { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBits; +typedef VkFlags VkExternalMemoryHandleTypeFlags; + +typedef enum VkExternalMemoryFeatureFlagBits { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBits; +typedef VkFlags VkExternalMemoryFeatureFlags; + +typedef enum VkExternalFenceHandleTypeFlagBits { + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceHandleTypeFlagBits; +typedef VkFlags VkExternalFenceHandleTypeFlags; + +typedef enum VkExternalFenceFeatureFlagBits { + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceFeatureFlagBits; +typedef VkFlags VkExternalFenceFeatureFlags; + +typedef enum VkFenceImportFlagBits { + VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT, + VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceImportFlagBits; +typedef VkFlags VkFenceImportFlags; + +typedef enum VkSemaphoreImportFlagBits { + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreImportFlagBits; +typedef VkFlags VkSemaphoreImportFlags; + +typedef enum VkExternalSemaphoreHandleTypeFlagBits { + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreHandleTypeFlagBits; +typedef VkFlags VkExternalSemaphoreHandleTypeFlags; + +typedef enum VkExternalSemaphoreFeatureFlagBits { + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreFeatureFlagBits; +typedef VkFlags VkExternalSemaphoreFeatureFlags; + +typedef struct VkPhysicalDeviceSubgroupProperties { + VkStructureType sType; + void* pNext; + uint32_t subgroupSize; + VkShaderStageFlags supportedStages; + VkSubgroupFeatureFlags supportedOperations; + VkBool32 quadOperationsInAllStages; +} VkPhysicalDeviceSubgroupProperties; + +typedef struct VkBindBufferMemoryInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindBufferMemoryInfo; + +typedef struct VkBindImageMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindImageMemoryInfo; + +typedef struct VkPhysicalDevice16BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; +} VkPhysicalDevice16BitStorageFeatures; + +typedef struct VkMemoryDedicatedRequirements { + VkStructureType sType; + void* pNext; + VkBool32 prefersDedicatedAllocation; + VkBool32 requiresDedicatedAllocation; +} VkMemoryDedicatedRequirements; + +typedef struct VkMemoryDedicatedAllocateInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkMemoryDedicatedAllocateInfo; + +typedef struct VkMemoryAllocateFlagsInfo { + VkStructureType sType; + const void* pNext; + VkMemoryAllocateFlags flags; + uint32_t deviceMask; +} VkMemoryAllocateFlagsInfo; + +typedef struct VkDeviceGroupRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const VkRect2D* pDeviceRenderAreas; +} VkDeviceGroupRenderPassBeginInfo; + +typedef struct VkDeviceGroupCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; +} VkDeviceGroupCommandBufferBeginInfo; + +typedef struct VkDeviceGroupSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; +} VkDeviceGroupSubmitInfo; + +typedef struct VkDeviceGroupBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; +} VkDeviceGroupBindSparseInfo; + +typedef struct VkBindBufferMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindBufferMemoryDeviceGroupInfo; + +typedef struct VkBindImageMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t splitInstanceBindRegionCount; + const VkRect2D* pSplitInstanceBindRegions; +} VkBindImageMemoryDeviceGroupInfo; + +typedef struct VkPhysicalDeviceGroupProperties { + VkStructureType sType; + void* pNext; + uint32_t physicalDeviceCount; + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; + VkBool32 subsetAllocation; +} VkPhysicalDeviceGroupProperties; + +typedef struct VkDeviceGroupDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t physicalDeviceCount; + const VkPhysicalDevice* pPhysicalDevices; +} VkDeviceGroupDeviceCreateInfo; + +typedef struct VkBufferMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferMemoryRequirementsInfo2; + +typedef struct VkImageMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageMemoryRequirementsInfo2; + +typedef struct VkImageSparseMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageSparseMemoryRequirementsInfo2; + +typedef struct VkMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkMemoryRequirements memoryRequirements; +} VkMemoryRequirements2; + +typedef struct VkSparseImageMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkSparseImageMemoryRequirements memoryRequirements; +} VkSparseImageMemoryRequirements2; + +typedef struct VkPhysicalDeviceFeatures2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceFeatures features; +} VkPhysicalDeviceFeatures2; + +typedef struct VkPhysicalDeviceProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties properties; +} VkPhysicalDeviceProperties2; + +typedef struct VkFormatProperties2 { + VkStructureType sType; + void* pNext; + VkFormatProperties formatProperties; +} VkFormatProperties2; + +typedef struct VkImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkImageFormatProperties imageFormatProperties; +} VkImageFormatProperties2; + +typedef struct VkPhysicalDeviceImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkImageCreateFlags flags; +} VkPhysicalDeviceImageFormatInfo2; + +typedef struct VkQueueFamilyProperties2 { + VkStructureType sType; + void* pNext; + VkQueueFamilyProperties queueFamilyProperties; +} VkQueueFamilyProperties2; + +typedef struct VkPhysicalDeviceMemoryProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceMemoryProperties memoryProperties; +} VkPhysicalDeviceMemoryProperties2; + +typedef struct VkSparseImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkSparseImageFormatProperties properties; +} VkSparseImageFormatProperties2; + +typedef struct VkPhysicalDeviceSparseImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkSampleCountFlagBits samples; + VkImageUsageFlags usage; + VkImageTiling tiling; +} VkPhysicalDeviceSparseImageFormatInfo2; + +typedef struct VkPhysicalDevicePointClippingProperties { + VkStructureType sType; + void* pNext; + VkPointClippingBehavior pointClippingBehavior; +} VkPhysicalDevicePointClippingProperties; + +typedef struct VkInputAttachmentAspectReference { + uint32_t subpass; + uint32_t inputAttachmentIndex; + VkImageAspectFlags aspectMask; +} VkInputAttachmentAspectReference; + +typedef struct VkRenderPassInputAttachmentAspectCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t aspectReferenceCount; + const VkInputAttachmentAspectReference* pAspectReferences; +} VkRenderPassInputAttachmentAspectCreateInfo; + +typedef struct VkImageViewUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags usage; +} VkImageViewUsageCreateInfo; + +typedef struct VkPipelineTessellationDomainOriginStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkTessellationDomainOrigin domainOrigin; +} VkPipelineTessellationDomainOriginStateCreateInfo; + +typedef struct VkRenderPassMultiviewCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; +} VkRenderPassMultiviewCreateInfo; + +typedef struct VkPhysicalDeviceMultiviewFeatures { + VkStructureType sType; + void* pNext; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; +} VkPhysicalDeviceMultiviewFeatures; + +typedef struct VkPhysicalDeviceMultiviewProperties { + VkStructureType sType; + void* pNext; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; +} VkPhysicalDeviceMultiviewProperties; + +typedef struct VkPhysicalDeviceVariablePointerFeatures { + VkStructureType sType; + void* pNext; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; +} VkPhysicalDeviceVariablePointerFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 protectedMemory; +} VkPhysicalDeviceProtectedMemoryFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryProperties { + VkStructureType sType; + void* pNext; + VkBool32 protectedNoFault; +} VkPhysicalDeviceProtectedMemoryProperties; + +typedef struct VkDeviceQueueInfo2 { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueIndex; +} VkDeviceQueueInfo2; + +typedef struct VkProtectedSubmitInfo { + VkStructureType sType; + const void* pNext; + VkBool32 protectedSubmit; +} VkProtectedSubmitInfo; + +typedef struct VkSamplerYcbcrConversionCreateInfo { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkSamplerYcbcrModelConversion ycbcrModel; + VkSamplerYcbcrRange ycbcrRange; + VkComponentMapping components; + VkChromaLocation xChromaOffset; + VkChromaLocation yChromaOffset; + VkFilter chromaFilter; + VkBool32 forceExplicitReconstruction; +} VkSamplerYcbcrConversionCreateInfo; + +typedef struct VkSamplerYcbcrConversionInfo { + VkStructureType sType; + const void* pNext; + VkSamplerYcbcrConversion conversion; +} VkSamplerYcbcrConversionInfo; + +typedef struct VkBindImagePlaneMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkBindImagePlaneMemoryInfo; + +typedef struct VkImagePlaneMemoryRequirementsInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkImagePlaneMemoryRequirementsInfo; + +typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures { + VkStructureType sType; + void* pNext; + VkBool32 samplerYcbcrConversion; +} VkPhysicalDeviceSamplerYcbcrConversionFeatures; + +typedef struct VkSamplerYcbcrConversionImageFormatProperties { + VkStructureType sType; + void* pNext; + uint32_t combinedImageSamplerDescriptorCount; +} VkSamplerYcbcrConversionImageFormatProperties; + +typedef struct VkDescriptorUpdateTemplateEntry { + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + size_t offset; + size_t stride; +} VkDescriptorUpdateTemplateEntry; + +typedef struct VkDescriptorUpdateTemplateCreateInfo { + VkStructureType sType; + void* pNext; + VkDescriptorUpdateTemplateCreateFlags flags; + uint32_t descriptorUpdateEntryCount; + const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; + VkDescriptorUpdateTemplateType templateType; + VkDescriptorSetLayout descriptorSetLayout; + VkPipelineBindPoint pipelineBindPoint; + VkPipelineLayout pipelineLayout; + uint32_t set; +} VkDescriptorUpdateTemplateCreateInfo; + +typedef struct VkExternalMemoryProperties { + VkExternalMemoryFeatureFlags externalMemoryFeatures; + VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlags compatibleHandleTypes; +} VkExternalMemoryProperties; + +typedef struct VkPhysicalDeviceExternalImageFormatInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalImageFormatInfo; + +typedef struct VkExternalImageFormatProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalImageFormatProperties; + +typedef struct VkPhysicalDeviceExternalBufferInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkBufferUsageFlags usage; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalBufferInfo; + +typedef struct VkExternalBufferProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalBufferProperties; + +typedef struct VkPhysicalDeviceIDProperties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; +} VkPhysicalDeviceIDProperties; + +typedef struct VkExternalMemoryImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryImageCreateInfo; + +typedef struct VkExternalMemoryBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryBufferCreateInfo; + +typedef struct VkExportMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExportMemoryAllocateInfo; + +typedef struct VkPhysicalDeviceExternalFenceInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalFenceInfo; + +typedef struct VkExternalFenceProperties { + VkStructureType sType; + void* pNext; + VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes; + VkExternalFenceHandleTypeFlags compatibleHandleTypes; + VkExternalFenceFeatureFlags externalFenceFeatures; +} VkExternalFenceProperties; + +typedef struct VkExportFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlags handleTypes; +} VkExportFenceCreateInfo; + +typedef struct VkExportSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlags handleTypes; +} VkExportSemaphoreCreateInfo; + +typedef struct VkPhysicalDeviceExternalSemaphoreInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalSemaphoreInfo; + +typedef struct VkExternalSemaphoreProperties { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; + VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes; + VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures; +} VkExternalSemaphoreProperties; + +typedef struct VkPhysicalDeviceMaintenance3Properties { + VkStructureType sType; + void* pNext; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceMaintenance3Properties; + +typedef struct VkDescriptorSetLayoutSupport { + VkStructureType sType; + void* pNext; + VkBool32 supported; +} VkDescriptorSetLayoutSupport; + +typedef struct VkPhysicalDeviceShaderDrawParameterFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceShaderDrawParameterFeatures; + + +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion( + uint32_t* pApiVersion); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2( + VkDevice device, + const VkDeviceQueueInfo2* pQueueInfo, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + +#define VK_KHR_surface 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) + +#define VK_KHR_SURFACE_SPEC_VERSION 25 +#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" +#define VK_COLORSPACE_SRGB_NONLINEAR_KHR VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + + +typedef enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, + VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, + VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, + VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, + VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, + VK_COLOR_SPACE_BEGIN_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_END_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_RANGE_SIZE_KHR = (VK_COLOR_SPACE_SRGB_NONLINEAR_KHR - VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + 1), + VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkColorSpaceKHR; + +typedef enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, + VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, + VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, + VK_PRESENT_MODE_BEGIN_RANGE_KHR = VK_PRESENT_MODE_IMMEDIATE_KHR, + VK_PRESENT_MODE_END_RANGE_KHR = VK_PRESENT_MODE_FIFO_RELAXED_KHR, + VK_PRESENT_MODE_RANGE_SIZE_KHR = (VK_PRESENT_MODE_FIFO_RELAXED_KHR - VK_PRESENT_MODE_IMMEDIATE_KHR + 1), + VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentModeKHR; + + +typedef enum VkSurfaceTransformFlagBitsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, + VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSurfaceTransformFlagBitsKHR; +typedef VkFlags VkSurfaceTransformFlagsKHR; + +typedef enum VkCompositeAlphaFlagBitsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, + VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCompositeAlphaFlagBitsKHR; +typedef VkFlags VkCompositeAlphaFlagsKHR; + +typedef struct VkSurfaceCapabilitiesKHR { + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; +} VkSurfaceCapabilitiesKHR; + +typedef struct VkSurfaceFormatKHR { + VkFormat format; + VkColorSpaceKHR colorSpace; +} VkSurfaceFormatKHR; + + +typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( + VkInstance instance, + VkSurfaceKHR surface, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + VkSurfaceKHR surface, + VkBool32* pSupported); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormatKHR* pSurfaceFormats); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif + +#define VK_KHR_swapchain 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) + +#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70 +#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" + + +typedef enum VkSwapchainCreateFlagBitsKHR { + VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001, + VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002, + VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSwapchainCreateFlagBitsKHR; +typedef VkFlags VkSwapchainCreateFlagsKHR; + +typedef enum VkDeviceGroupPresentModeFlagBitsKHR { + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001, + VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002, + VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004, + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008, + VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDeviceGroupPresentModeFlagBitsKHR; +typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; + +typedef struct VkSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainCreateFlagsKHR flags; + VkSurfaceKHR surface; + uint32_t minImageCount; + VkFormat imageFormat; + VkColorSpaceKHR imageColorSpace; + VkExtent2D imageExtent; + uint32_t imageArrayLayers; + VkImageUsageFlags imageUsage; + VkSharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkSurfaceTransformFlagBitsKHR preTransform; + VkCompositeAlphaFlagBitsKHR compositeAlpha; + VkPresentModeKHR presentMode; + VkBool32 clipped; + VkSwapchainKHR oldSwapchain; +} VkSwapchainCreateInfoKHR; + +typedef struct VkPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t swapchainCount; + const VkSwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + VkResult* pResults; +} VkPresentInfoKHR; + +typedef struct VkImageSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; +} VkImageSwapchainCreateInfoKHR; + +typedef struct VkBindImageMemorySwapchainInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndex; +} VkBindImageMemorySwapchainInfoKHR; + +typedef struct VkAcquireNextImageInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint64_t timeout; + VkSemaphore semaphore; + VkFence fence; + uint32_t deviceMask; +} VkAcquireNextImageInfoKHR; + +typedef struct VkDeviceGroupPresentCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupPresentCapabilitiesKHR; + +typedef struct VkDeviceGroupPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + VkDeviceGroupPresentModeFlagBitsKHR mode; +} VkDeviceGroupPresentInfoKHR; + +typedef struct VkDeviceGroupSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupSwapchainCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); +typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); +typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( + VkDevice device, + const VkSwapchainCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchain); + +VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pSwapchainImageCount, + VkImage* pSwapchainImages); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t timeout, + VkSemaphore semaphore, + VkFence fence, + uint32_t* pImageIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( + VkQueue queue, + const VkPresentInfoKHR* pPresentInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( + VkDevice device, + const VkAcquireNextImageInfoKHR* pAcquireInfo, + uint32_t* pImageIndex); +#endif + +#define VK_KHR_display 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) + +#define VK_KHR_DISPLAY_SPEC_VERSION 21 +#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" + + +typedef enum VkDisplayPlaneAlphaFlagBitsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, + VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDisplayPlaneAlphaFlagBitsKHR; +typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; +typedef VkFlags VkDisplayModeCreateFlagsKHR; +typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; + +typedef struct VkDisplayPropertiesKHR { + VkDisplayKHR display; + const char* displayName; + VkExtent2D physicalDimensions; + VkExtent2D physicalResolution; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkBool32 planeReorderPossible; + VkBool32 persistentContent; +} VkDisplayPropertiesKHR; + +typedef struct VkDisplayModeParametersKHR { + VkExtent2D visibleRegion; + uint32_t refreshRate; +} VkDisplayModeParametersKHR; + +typedef struct VkDisplayModePropertiesKHR { + VkDisplayModeKHR displayMode; + VkDisplayModeParametersKHR parameters; +} VkDisplayModePropertiesKHR; + +typedef struct VkDisplayModeCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeCreateFlagsKHR flags; + VkDisplayModeParametersKHR parameters; +} VkDisplayModeCreateInfoKHR; + +typedef struct VkDisplayPlaneCapabilitiesKHR { + VkDisplayPlaneAlphaFlagsKHR supportedAlpha; + VkOffset2D minSrcPosition; + VkOffset2D maxSrcPosition; + VkExtent2D minSrcExtent; + VkExtent2D maxSrcExtent; + VkOffset2D minDstPosition; + VkOffset2D maxDstPosition; + VkExtent2D minDstExtent; + VkExtent2D maxDstExtent; +} VkDisplayPlaneCapabilitiesKHR; + +typedef struct VkDisplayPlanePropertiesKHR { + VkDisplayKHR currentDisplay; + uint32_t currentStackIndex; +} VkDisplayPlanePropertiesKHR; + +typedef struct VkDisplaySurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceCreateFlagsKHR flags; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkDisplaySurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlanePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( + VkPhysicalDevice physicalDevice, + uint32_t planeIndex, + uint32_t* pDisplayCount, + VkDisplayKHR* pDisplays); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDisplayModeKHR* pMode); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR* pCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( + VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#define VK_KHR_display_swapchain 1 +#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 9 +#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" + +typedef struct VkDisplayPresentInfoKHR { + VkStructureType sType; + const void* pNext; + VkRect2D srcRect; + VkRect2D dstRect; + VkBool32 persistent; +} VkDisplayPresentInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchains); +#endif + +#define VK_KHR_sampler_mirror_clamp_to_edge 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" + + +#define VK_KHR_multiview 1 +#define VK_KHR_MULTIVIEW_SPEC_VERSION 1 +#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview" + +typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR; + +typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR; + +typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR; + + + +#define VK_KHR_get_physical_device_properties2 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" + +typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR; + +typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR; + +typedef VkFormatProperties2 VkFormatProperties2KHR; + +typedef VkImageFormatProperties2 VkImageFormatProperties2KHR; + +typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR; + +typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR; + +typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR; + +typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR; + +typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); +#endif + +#define VK_KHR_device_group 1 +#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 3 +#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group" + +typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR; + +typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR; + +typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR; + +typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR; + + +typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR; + +typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR; + +typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR; + +typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR; + +typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR; + +typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR; + +typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif + +#define VK_KHR_shader_draw_parameters 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" + + +#define VK_KHR_maintenance1 1 +#define VK_KHR_MAINTENANCE1_SPEC_VERSION 2 +#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1" + +typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; + + +typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); +#endif + +#define VK_KHR_device_group_creation 1 +#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1 +#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation" +#define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE + +typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR; + +typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +#endif + +#define VK_KHR_external_memory_capabilities 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" +#define VK_LUID_SIZE_KHR VK_LUID_SIZE + +typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR; + +typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR; + +typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR; + +typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR; + + +typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR; + +typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR; + +typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR; + +typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR; + +typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR; + +typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); +#endif + +#define VK_KHR_external_memory 1 +#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" +#define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL + +typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR; + +typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR; + +typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR; + + + +#define VK_KHR_external_memory_fd 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" + +typedef struct VkImportMemoryFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + int fd; +} VkImportMemoryFdInfoKHR; + +typedef struct VkMemoryFdPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryFdPropertiesKHR; + +typedef struct VkMemoryGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties); +#endif + +#define VK_KHR_external_semaphore_capabilities 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" + +typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR; + +typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR; + +typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR; + +typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR; + + +typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR; + +typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +#endif + +#define VK_KHR_external_semaphore 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" + +typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR; + +typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR; + + +typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR; + + + +#define VK_KHR_external_semaphore_fd 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" + +typedef struct VkImportSemaphoreFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + int fd; +} VkImportSemaphoreFdInfoKHR; + +typedef struct VkSemaphoreGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + +#define VK_KHR_push_descriptor 1 +#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 +#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" + +typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorPropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); +#endif + +#define VK_KHR_16bit_storage 1 +#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" + +typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR; + + + +#define VK_KHR_incremental_present 1 +#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1 +#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" + +typedef struct VkRectLayerKHR { + VkOffset2D offset; + VkExtent2D extent; + uint32_t layer; +} VkRectLayerKHR; + +typedef struct VkPresentRegionKHR { + uint32_t rectangleCount; + const VkRectLayerKHR* pRectangles; +} VkPresentRegionKHR; + +typedef struct VkPresentRegionsKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentRegionKHR* pRegions; +} VkPresentRegionsKHR; + + + +#define VK_KHR_descriptor_update_template 1 +typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR; + + +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" + +typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR; + + +typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR; + + +typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR; + +typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); +#endif + +#define VK_KHR_shared_presentable_image 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" + +typedef struct VkSharedPresentSurfaceCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkImageUsageFlags sharedPresentSupportedUsageFlags; +} VkSharedPresentSurfaceCapabilitiesKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain); +#endif + +#define VK_KHR_external_fence_capabilities 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" + +typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR; + +typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR; + +typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR; + +typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR; + + +typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR; + +typedef VkExternalFenceProperties VkExternalFencePropertiesKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); +#endif + +#define VK_KHR_external_fence 1 +#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" + +typedef VkFenceImportFlags VkFenceImportFlagsKHR; + +typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR; + + +typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR; + + + +#define VK_KHR_external_fence_fd 1 +#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" + +typedef struct VkImportFenceFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + int fd; +} VkImportFenceFdInfoKHR; + +typedef struct VkFenceGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetFdInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + +#define VK_KHR_maintenance2 1 +#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2" + +typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; + +typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR; + + +typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR; + +typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR; + +typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR; + +typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR; + +typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR; + + + +#define VK_KHR_get_surface_capabilities2 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" + +typedef struct VkPhysicalDeviceSurfaceInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSurfaceKHR surface; +} VkPhysicalDeviceSurfaceInfo2KHR; + +typedef struct VkSurfaceCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceCapabilitiesKHR surfaceCapabilities; +} VkSurfaceCapabilities2KHR; + +typedef struct VkSurfaceFormat2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceFormatKHR surfaceFormat; +} VkSurfaceFormat2KHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkSurfaceCapabilities2KHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormat2KHR* pSurfaceFormats); +#endif + +#define VK_KHR_variable_pointers 1 +#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 +#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" + +typedef VkPhysicalDeviceVariablePointerFeatures VkPhysicalDeviceVariablePointerFeaturesKHR; + + + +#define VK_KHR_dedicated_allocation 1 +#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 +#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" + +typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR; + +typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR; + + + +#define VK_KHR_storage_buffer_storage_class 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" + + +#define VK_KHR_relaxed_block_layout 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" + + +#define VK_KHR_get_memory_requirements2 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" + +typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR; + +typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR; + +typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR; + +typedef VkMemoryRequirements2 VkMemoryRequirements2KHR; + +typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR; + + +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +#endif + +#define VK_KHR_image_format_list 1 +#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 +#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" + +typedef struct VkImageFormatListCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkImageFormatListCreateInfoKHR; + + + +#define VK_KHR_sampler_ycbcr_conversion 1 +typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR; + + +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 1 +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" + +typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR; + +typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR; + +typedef VkChromaLocation VkChromaLocationKHR; + + +typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR; + +typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR; + +typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR; + +typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR; + +typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; + +typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif + +#define VK_KHR_bind_memory2 1 +#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" + +typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR; + +typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); +#endif + +#define VK_KHR_maintenance3 1 +#define VK_KHR_MAINTENANCE3_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE3_EXTENSION_NAME "VK_KHR_maintenance3" + +typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; + +typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; + + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + +#define VK_EXT_debug_report 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) + +#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 9 +#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" +#define VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT +#define VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT + + +typedef enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT = 31, + VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT = 32, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_BEGIN_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_END_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_RANGE_SIZE_EXT = (VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT - VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT + 1), + VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportObjectTypeEXT; + + +typedef enum VkDebugReportFlagBitsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, + VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportFlagBitsEXT; +typedef VkFlags VkDebugReportFlagsEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + +typedef struct VkDebugReportCallbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; +} VkDebugReportCallbackCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( + VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugReportCallbackEXT* pCallback); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( + VkInstance instance, + VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( + VkInstance instance, + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage); +#endif + +#define VK_NV_glsl_shader 1 +#define VK_NV_GLSL_SHADER_SPEC_VERSION 1 +#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" + + +#define VK_EXT_depth_range_unrestricted 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" + + +#define VK_IMG_filter_cubic 1 +#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 +#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" + + +#define VK_AMD_rasterization_order 1 +#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 +#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" + + +typedef enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_BEGIN_RANGE_AMD = VK_RASTERIZATION_ORDER_STRICT_AMD, + VK_RASTERIZATION_ORDER_END_RANGE_AMD = VK_RASTERIZATION_ORDER_RELAXED_AMD, + VK_RASTERIZATION_ORDER_RANGE_SIZE_AMD = (VK_RASTERIZATION_ORDER_RELAXED_AMD - VK_RASTERIZATION_ORDER_STRICT_AMD + 1), + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF +} VkRasterizationOrderAMD; + +typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { + VkStructureType sType; + const void* pNext; + VkRasterizationOrderAMD rasterizationOrder; +} VkPipelineRasterizationStateRasterizationOrderAMD; + + + +#define VK_AMD_shader_trinary_minmax 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" + + +#define VK_AMD_shader_explicit_vertex_parameter 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" + + +#define VK_EXT_debug_marker 1 +#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 +#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" + +typedef struct VkDebugMarkerObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; +} VkDebugMarkerObjectNameInfoEXT; + +typedef struct VkDebugMarkerObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugMarkerObjectTagInfoEXT; + +typedef struct VkDebugMarkerMarkerInfoEXT { + VkStructureType sType; + const void* pNext; + const char* pMarkerName; + float color[4]; +} VkDebugMarkerMarkerInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif + +#define VK_AMD_gcn_shader 1 +#define VK_AMD_GCN_SHADER_SPEC_VERSION 1 +#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" + + +#define VK_NV_dedicated_allocation 1 +#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" + +typedef struct VkDedicatedAllocationImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationImageCreateInfoNV; + +typedef struct VkDedicatedAllocationBufferCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationBufferCreateInfoNV; + +typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkDedicatedAllocationMemoryAllocateInfoNV; + + + +#define VK_AMD_draw_indirect_count 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" + +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + +#define VK_AMD_negative_viewport_height 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" + + +#define VK_AMD_gpu_shader_half_float 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" + + +#define VK_AMD_shader_ballot 1 +#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 +#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" + + +#define VK_AMD_texture_gather_bias_lod 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" + +typedef struct VkTextureLODGatherFormatPropertiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 supportsTextureGatherLODBiasAMD; +} VkTextureLODGatherFormatPropertiesAMD; + + + +#define VK_AMD_shader_info 1 +#define VK_AMD_SHADER_INFO_SPEC_VERSION 1 +#define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info" + + +typedef enum VkShaderInfoTypeAMD { + VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0, + VK_SHADER_INFO_TYPE_BINARY_AMD = 1, + VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2, + VK_SHADER_INFO_TYPE_BEGIN_RANGE_AMD = VK_SHADER_INFO_TYPE_STATISTICS_AMD, + VK_SHADER_INFO_TYPE_END_RANGE_AMD = VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD, + VK_SHADER_INFO_TYPE_RANGE_SIZE_AMD = (VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD - VK_SHADER_INFO_TYPE_STATISTICS_AMD + 1), + VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderInfoTypeAMD; + +typedef struct VkShaderResourceUsageAMD { + uint32_t numUsedVgprs; + uint32_t numUsedSgprs; + uint32_t ldsSizePerLocalWorkGroup; + size_t ldsUsageSizeInBytes; + size_t scratchMemUsageInBytes; +} VkShaderResourceUsageAMD; + +typedef struct VkShaderStatisticsInfoAMD { + VkShaderStageFlags shaderStageMask; + VkShaderResourceUsageAMD resourceUsage; + uint32_t numPhysicalVgprs; + uint32_t numPhysicalSgprs; + uint32_t numAvailableVgprs; + uint32_t numAvailableSgprs; + uint32_t computeWorkGroupSize[3]; +} VkShaderStatisticsInfoAMD; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD( + VkDevice device, + VkPipeline pipeline, + VkShaderStageFlagBits shaderStage, + VkShaderInfoTypeAMD infoType, + size_t* pInfoSize, + void* pInfo); +#endif + +#define VK_AMD_shader_image_load_store_lod 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod" + + +#define VK_IMG_format_pvrtc 1 +#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 +#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" + + +#define VK_NV_external_memory_capabilities 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" + + +typedef enum VkExternalMemoryHandleTypeFlagBitsNV { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsNV; +typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + +typedef enum VkExternalMemoryFeatureFlagBitsNV { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsNV; +typedef VkFlags VkExternalMemoryFeatureFlagsNV; + +typedef struct VkExternalImageFormatPropertiesNV { + VkImageFormatProperties imageFormatProperties; + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; +} VkExternalImageFormatPropertiesNV; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); +#endif + +#define VK_NV_external_memory 1 +#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" + +typedef struct VkExternalMemoryImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExternalMemoryImageCreateInfoNV; + +typedef struct VkExportMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExportMemoryAllocateInfoNV; + + + +#define VK_EXT_validation_flags 1 +#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" + + +typedef enum VkValidationCheckEXT { + VK_VALIDATION_CHECK_ALL_EXT = 0, + VK_VALIDATION_CHECK_SHADERS_EXT = 1, + VK_VALIDATION_CHECK_BEGIN_RANGE_EXT = VK_VALIDATION_CHECK_ALL_EXT, + VK_VALIDATION_CHECK_END_RANGE_EXT = VK_VALIDATION_CHECK_SHADERS_EXT, + VK_VALIDATION_CHECK_RANGE_SIZE_EXT = (VK_VALIDATION_CHECK_SHADERS_EXT - VK_VALIDATION_CHECK_ALL_EXT + 1), + VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCheckEXT; + +typedef struct VkValidationFlagsEXT { + VkStructureType sType; + const void* pNext; + uint32_t disabledValidationCheckCount; + VkValidationCheckEXT* pDisabledValidationChecks; +} VkValidationFlagsEXT; + + + +#define VK_EXT_shader_subgroup_ballot 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" + + +#define VK_EXT_shader_subgroup_vote 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" + + +#define VK_NVX_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkObjectTableNVX) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNVX) + +#define VK_NVX_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 +#define VK_NVX_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NVX_device_generated_commands" + + +typedef enum VkIndirectCommandsTokenTypeNVX { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DESCRIPTOR_SET_NVX = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NVX = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NVX = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NVX = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NVX = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NVX = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_BEGIN_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_END_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_RANGE_SIZE_NVX = (VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX - VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX + 1), + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeNVX; + +typedef enum VkObjectEntryTypeNVX { + VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX = 0, + VK_OBJECT_ENTRY_TYPE_PIPELINE_NVX = 1, + VK_OBJECT_ENTRY_TYPE_INDEX_BUFFER_NVX = 2, + VK_OBJECT_ENTRY_TYPE_VERTEX_BUFFER_NVX = 3, + VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX = 4, + VK_OBJECT_ENTRY_TYPE_BEGIN_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX, + VK_OBJECT_ENTRY_TYPE_END_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX, + VK_OBJECT_ENTRY_TYPE_RANGE_SIZE_NVX = (VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX - VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX + 1), + VK_OBJECT_ENTRY_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF +} VkObjectEntryTypeNVX; + + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsNVX { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX = 0x00000004, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX = 0x00000008, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsNVX; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNVX; + +typedef enum VkObjectEntryUsageFlagBitsNVX { + VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX = 0x00000001, + VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX = 0x00000002, + VK_OBJECT_ENTRY_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF +} VkObjectEntryUsageFlagBitsNVX; +typedef VkFlags VkObjectEntryUsageFlagsNVX; + +typedef struct VkDeviceGeneratedCommandsFeaturesNVX { + VkStructureType sType; + const void* pNext; + VkBool32 computeBindingPointSupport; +} VkDeviceGeneratedCommandsFeaturesNVX; + +typedef struct VkDeviceGeneratedCommandsLimitsNVX { + VkStructureType sType; + const void* pNext; + uint32_t maxIndirectCommandsLayoutTokenCount; + uint32_t maxObjectEntryCounts; + uint32_t minSequenceCountBufferOffsetAlignment; + uint32_t minSequenceIndexBufferOffsetAlignment; + uint32_t minCommandsTokenBufferOffsetAlignment; +} VkDeviceGeneratedCommandsLimitsNVX; + +typedef struct VkIndirectCommandsTokenNVX { + VkIndirectCommandsTokenTypeNVX tokenType; + VkBuffer buffer; + VkDeviceSize offset; +} VkIndirectCommandsTokenNVX; + +typedef struct VkIndirectCommandsLayoutTokenNVX { + VkIndirectCommandsTokenTypeNVX tokenType; + uint32_t bindingUnit; + uint32_t dynamicCount; + uint32_t divisor; +} VkIndirectCommandsLayoutTokenNVX; + +typedef struct VkIndirectCommandsLayoutCreateInfoNVX { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkIndirectCommandsLayoutUsageFlagsNVX flags; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenNVX* pTokens; +} VkIndirectCommandsLayoutCreateInfoNVX; + +typedef struct VkCmdProcessCommandsInfoNVX { + VkStructureType sType; + const void* pNext; + VkObjectTableNVX objectTable; + VkIndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t indirectCommandsTokenCount; + const VkIndirectCommandsTokenNVX* pIndirectCommandsTokens; + uint32_t maxSequencesCount; + VkCommandBuffer targetCommandBuffer; + VkBuffer sequencesCountBuffer; + VkDeviceSize sequencesCountOffset; + VkBuffer sequencesIndexBuffer; + VkDeviceSize sequencesIndexOffset; +} VkCmdProcessCommandsInfoNVX; + +typedef struct VkCmdReserveSpaceForCommandsInfoNVX { + VkStructureType sType; + const void* pNext; + VkObjectTableNVX objectTable; + VkIndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t maxSequencesCount; +} VkCmdReserveSpaceForCommandsInfoNVX; + +typedef struct VkObjectTableCreateInfoNVX { + VkStructureType sType; + const void* pNext; + uint32_t objectCount; + const VkObjectEntryTypeNVX* pObjectEntryTypes; + const uint32_t* pObjectEntryCounts; + const VkObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags; + uint32_t maxUniformBuffersPerDescriptor; + uint32_t maxStorageBuffersPerDescriptor; + uint32_t maxStorageImagesPerDescriptor; + uint32_t maxSampledImagesPerDescriptor; + uint32_t maxPipelineLayouts; +} VkObjectTableCreateInfoNVX; + +typedef struct VkObjectTableEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; +} VkObjectTableEntryNVX; + +typedef struct VkObjectTablePipelineEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipeline pipeline; +} VkObjectTablePipelineEntryNVX; + +typedef struct VkObjectTableDescriptorSetEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipelineLayout pipelineLayout; + VkDescriptorSet descriptorSet; +} VkObjectTableDescriptorSetEntryNVX; + +typedef struct VkObjectTableVertexBufferEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkBuffer buffer; +} VkObjectTableVertexBufferEntryNVX; + +typedef struct VkObjectTableIndexBufferEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkBuffer buffer; + VkIndexType indexType; +} VkObjectTableIndexBufferEntryNVX; + +typedef struct VkObjectTablePushConstantEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipelineLayout pipelineLayout; + VkShaderStageFlags stageFlags; +} VkObjectTablePushConstantEntryNVX; + + +typedef void (VKAPI_PTR *PFN_vkCmdProcessCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdReserveSpaceForCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNVX)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNVX)(VkDevice device, VkIndirectCommandsLayoutNVX indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateObjectTableNVX)(VkDevice device, const VkObjectTableCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkObjectTableNVX* pObjectTable); +typedef void (VKAPI_PTR *PFN_vkDestroyObjectTableNVX)(VkDevice device, VkObjectTableNVX objectTable, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices); +typedef VkResult (VKAPI_PTR *PFN_vkUnregisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX)(VkPhysicalDevice physicalDevice, VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, VkDeviceGeneratedCommandsLimitsNVX* pLimits); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdProcessCommandsNVX( + VkCommandBuffer commandBuffer, + const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdReserveSpaceForCommandsNVX( + VkCommandBuffer commandBuffer, + const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNVX( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNVX( + VkDevice device, + VkIndirectCommandsLayoutNVX indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateObjectTableNVX( + VkDevice device, + const VkObjectTableCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkObjectTableNVX* pObjectTable); + +VKAPI_ATTR void VKAPI_CALL vkDestroyObjectTableNVX( + VkDevice device, + VkObjectTableNVX objectTable, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterObjectsNVX( + VkDevice device, + VkObjectTableNVX objectTable, + uint32_t objectCount, + const VkObjectTableEntryNVX* const* ppObjectTableEntries, + const uint32_t* pObjectIndices); + +VKAPI_ATTR VkResult VKAPI_CALL vkUnregisterObjectsNVX( + VkDevice device, + VkObjectTableNVX objectTable, + uint32_t objectCount, + const VkObjectEntryTypeNVX* pObjectEntryTypes, + const uint32_t* pObjectIndices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( + VkPhysicalDevice physicalDevice, + VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, + VkDeviceGeneratedCommandsLimitsNVX* pLimits); +#endif + +#define VK_NV_clip_space_w_scaling 1 +#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 +#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" + +typedef struct VkViewportWScalingNV { + float xcoeff; + float ycoeff; +} VkViewportWScalingNV; + +typedef struct VkPipelineViewportWScalingStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportWScalingEnable; + uint32_t viewportCount; + const VkViewportWScalingNV* pViewportWScalings; +} VkPipelineViewportWScalingStateCreateInfoNV; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings); +#endif + +#define VK_EXT_direct_mode_display 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" + +typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); +#endif + +#define VK_EXT_display_surface_counter 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" +#define VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT + + +typedef enum VkSurfaceCounterFlagBitsEXT { + VK_SURFACE_COUNTER_VBLANK_EXT = 0x00000001, + VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSurfaceCounterFlagBitsEXT; +typedef VkFlags VkSurfaceCounterFlagsEXT; + +typedef struct VkSurfaceCapabilities2EXT { + VkStructureType sType; + void* pNext; + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; + VkSurfaceCounterFlagsEXT supportedSurfaceCounters; +} VkSurfaceCapabilities2EXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); +#endif + +#define VK_EXT_display_control 1 +#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" + + +typedef enum VkDisplayPowerStateEXT { + VK_DISPLAY_POWER_STATE_OFF_EXT = 0, + VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, + VK_DISPLAY_POWER_STATE_ON_EXT = 2, + VK_DISPLAY_POWER_STATE_BEGIN_RANGE_EXT = VK_DISPLAY_POWER_STATE_OFF_EXT, + VK_DISPLAY_POWER_STATE_END_RANGE_EXT = VK_DISPLAY_POWER_STATE_ON_EXT, + VK_DISPLAY_POWER_STATE_RANGE_SIZE_EXT = (VK_DISPLAY_POWER_STATE_ON_EXT - VK_DISPLAY_POWER_STATE_OFF_EXT + 1), + VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayPowerStateEXT; + +typedef enum VkDeviceEventTypeEXT { + VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, + VK_DEVICE_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT, + VK_DEVICE_EVENT_TYPE_END_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT, + VK_DEVICE_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT - VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT + 1), + VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceEventTypeEXT; + +typedef enum VkDisplayEventTypeEXT { + VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, + VK_DISPLAY_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT, + VK_DISPLAY_EVENT_TYPE_END_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT, + VK_DISPLAY_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT - VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT + 1), + VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayEventTypeEXT; + +typedef struct VkDisplayPowerInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayPowerStateEXT powerState; +} VkDisplayPowerInfoEXT; + +typedef struct VkDeviceEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceEventTypeEXT deviceEvent; +} VkDeviceEventInfoEXT; + +typedef struct VkDisplayEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayEventTypeEXT displayEvent; +} VkDisplayEventInfoEXT; + +typedef struct VkSwapchainCounterCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSurfaceCounterFlagsEXT surfaceCounters; +} VkSwapchainCounterCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue); +#endif + +#define VK_GOOGLE_display_timing 1 +#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 +#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" + +typedef struct VkRefreshCycleDurationGOOGLE { + uint64_t refreshDuration; +} VkRefreshCycleDurationGOOGLE; + +typedef struct VkPastPresentationTimingGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; +} VkPastPresentationTimingGOOGLE; + +typedef struct VkPresentTimeGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; +} VkPresentTimeGOOGLE; + +typedef struct VkPresentTimesInfoGOOGLE { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimeGOOGLE* pTimes; +} VkPresentTimesInfoGOOGLE; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings); +#endif + +#define VK_NV_sample_mask_override_coverage 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" + + +#define VK_NV_geometry_shader_passthrough 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" + + +#define VK_NV_viewport_array2 1 +#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2" + + +#define VK_NVX_multiview_per_view_attributes 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" + +typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + VkStructureType sType; + void* pNext; + VkBool32 perViewPositionAllComponents; +} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + + + +#define VK_NV_viewport_swizzle 1 +#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" + + +typedef enum VkViewportCoordinateSwizzleNV { + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, + VK_VIEWPORT_COORDINATE_SWIZZLE_BEGIN_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, + VK_VIEWPORT_COORDINATE_SWIZZLE_END_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV, + VK_VIEWPORT_COORDINATE_SWIZZLE_RANGE_SIZE_NV = (VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV - VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV + 1), + VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF +} VkViewportCoordinateSwizzleNV; + +typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; + +typedef struct VkViewportSwizzleNV { + VkViewportCoordinateSwizzleNV x; + VkViewportCoordinateSwizzleNV y; + VkViewportCoordinateSwizzleNV z; + VkViewportCoordinateSwizzleNV w; +} VkViewportSwizzleNV; + +typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const VkViewportSwizzleNV* pViewportSwizzles; +} VkPipelineViewportSwizzleStateCreateInfoNV; + + + +#define VK_EXT_discard_rectangles 1 +#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1 +#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" + + +typedef enum VkDiscardRectangleModeEXT { + VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, + VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, + VK_DISCARD_RECTANGLE_MODE_BEGIN_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT, + VK_DISCARD_RECTANGLE_MODE_END_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT, + VK_DISCARD_RECTANGLE_MODE_RANGE_SIZE_EXT = (VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT - VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT + 1), + VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDiscardRectangleModeEXT; + +typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; + +typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxDiscardRectangles; +} VkPhysicalDeviceDiscardRectanglePropertiesEXT; + +typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineDiscardRectangleStateCreateFlagsEXT flags; + VkDiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const VkRect2D* pDiscardRectangles; +} VkPipelineDiscardRectangleStateCreateInfoEXT; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles); +#endif + +#define VK_EXT_conservative_rasterization 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization" + + +typedef enum VkConservativeRasterizationModeEXT { + VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0, + VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1, + VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2, + VK_CONSERVATIVE_RASTERIZATION_MODE_BEGIN_RANGE_EXT = VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT, + VK_CONSERVATIVE_RASTERIZATION_MODE_END_RANGE_EXT = VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT, + VK_CONSERVATIVE_RASTERIZATION_MODE_RANGE_SIZE_EXT = (VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT - VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT + 1), + VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConservativeRasterizationModeEXT; + +typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; + +typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + float primitiveOverestimationSize; + float maxExtraPrimitiveOverestimationSize; + float extraPrimitiveOverestimationSizeGranularity; + VkBool32 primitiveUnderestimation; + VkBool32 conservativePointAndLineRasterization; + VkBool32 degenerateTrianglesRasterized; + VkBool32 degenerateLinesRasterized; + VkBool32 fullyCoveredFragmentShaderInputVariable; + VkBool32 conservativeRasterizationPostDepthCoverage; +} VkPhysicalDeviceConservativeRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationConservativeStateCreateFlagsEXT flags; + VkConservativeRasterizationModeEXT conservativeRasterizationMode; + float extraPrimitiveOverestimationSize; +} VkPipelineRasterizationConservativeStateCreateInfoEXT; + + + +#define VK_EXT_swapchain_colorspace 1 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 3 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" + + +#define VK_EXT_hdr_metadata 1 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 1 +#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" + +typedef struct VkXYColorEXT { + float x; + float y; +} VkXYColorEXT; + +typedef struct VkHdrMetadataEXT { + VkStructureType sType; + const void* pNext; + VkXYColorEXT displayPrimaryRed; + VkXYColorEXT displayPrimaryGreen; + VkXYColorEXT displayPrimaryBlue; + VkXYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; +} VkHdrMetadataEXT; + + +typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata); +#endif + +#define VK_EXT_external_memory_dma_buf 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf" + + +#define VK_EXT_queue_family_foreign 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign" +#define VK_QUEUE_FAMILY_FOREIGN_EXT (~0U-2) + + +#define VK_EXT_debug_utils 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) + +#define VK_EXT_DEBUG_UTILS_SPEC_VERSION 1 +#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils" + +typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; +typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; + +typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageSeverityFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; + +typedef enum VkDebugUtilsMessageTypeFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, + VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageTypeFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; + +typedef struct VkDebugUtilsObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + const char* pObjectName; +} VkDebugUtilsObjectNameInfoEXT; + +typedef struct VkDebugUtilsObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugUtilsObjectTagInfoEXT; + +typedef struct VkDebugUtilsLabelEXT { + VkStructureType sType; + const void* pNext; + const char* pLabelName; + float color[4]; +} VkDebugUtilsLabelEXT; + +typedef struct VkDebugUtilsMessengerCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCallbackDataFlagsEXT flags; + const char* pMessageIdName; + int32_t messageIdNumber; + const char* pMessage; + uint32_t queueLabelCount; + VkDebugUtilsLabelEXT* pQueueLabels; + uint32_t cmdBufLabelCount; + VkDebugUtilsLabelEXT* pCmdBufLabels; + uint32_t objectCount; + VkDebugUtilsObjectNameInfoEXT* pObjects; +} VkDebugUtilsMessengerCallbackDataEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageType, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDebugUtilsMessengerCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCreateFlagsEXT flags; + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; + VkDebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; + void* pUserData; +} VkDebugUtilsMessengerCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); +typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue); +typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT( + VkQueue queue); + +VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( + VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugUtilsMessengerEXT* pMessenger); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( + VkInstance instance, + VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT( + VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); +#endif + +#define VK_EXT_sampler_filter_minmax 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" + + +typedef enum VkSamplerReductionModeEXT { + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = 0, + VK_SAMPLER_REDUCTION_MODE_MIN_EXT = 1, + VK_SAMPLER_REDUCTION_MODE_MAX_EXT = 2, + VK_SAMPLER_REDUCTION_MODE_BEGIN_RANGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT, + VK_SAMPLER_REDUCTION_MODE_END_RANGE_EXT = VK_SAMPLER_REDUCTION_MODE_MAX_EXT, + VK_SAMPLER_REDUCTION_MODE_RANGE_SIZE_EXT = (VK_SAMPLER_REDUCTION_MODE_MAX_EXT - VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT + 1), + VK_SAMPLER_REDUCTION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSamplerReductionModeEXT; + +typedef struct VkSamplerReductionModeCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSamplerReductionModeEXT reductionMode; +} VkSamplerReductionModeCreateInfoEXT; + +typedef struct VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; +} VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + + +#define VK_AMD_gpu_shader_int16 1 +#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 1 +#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" + + +#define VK_AMD_mixed_attachment_samples 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" + + +#define VK_AMD_shader_fragment_mask 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" + + +#define VK_EXT_shader_stencil_export 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" + + +#define VK_EXT_sample_locations 1 +#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 +#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" + +typedef struct VkSampleLocationEXT { + float x; + float y; +} VkSampleLocationEXT; + +typedef struct VkSampleLocationsInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampleCountFlagBits sampleLocationsPerPixel; + VkExtent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const VkSampleLocationEXT* pSampleLocations; +} VkSampleLocationsInfoEXT; + +typedef struct VkAttachmentSampleLocationsEXT { + uint32_t attachmentIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkAttachmentSampleLocationsEXT; + +typedef struct VkSubpassSampleLocationsEXT { + uint32_t subpassIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkSubpassSampleLocationsEXT; + +typedef struct VkRenderPassSampleLocationsBeginInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentInitialSampleLocationsCount; + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations; +} VkRenderPassSampleLocationsBeginInfoEXT; + +typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 sampleLocationsEnable; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkPipelineSampleLocationsStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleLocationSampleCounts; + VkExtent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + VkBool32 variableSampleLocations; +} VkPhysicalDeviceSampleLocationsPropertiesEXT; + +typedef struct VkMultisamplePropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D maxSampleLocationGridSize; +} VkMultisamplePropertiesEXT; + + +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties); +#endif + +#define VK_EXT_blend_operation_advanced 1 +#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 +#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" + + +typedef enum VkBlendOverlapEXT { + VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, + VK_BLEND_OVERLAP_DISJOINT_EXT = 1, + VK_BLEND_OVERLAP_CONJOINT_EXT = 2, + VK_BLEND_OVERLAP_BEGIN_RANGE_EXT = VK_BLEND_OVERLAP_UNCORRELATED_EXT, + VK_BLEND_OVERLAP_END_RANGE_EXT = VK_BLEND_OVERLAP_CONJOINT_EXT, + VK_BLEND_OVERLAP_RANGE_SIZE_EXT = (VK_BLEND_OVERLAP_CONJOINT_EXT - VK_BLEND_OVERLAP_UNCORRELATED_EXT + 1), + VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBlendOverlapEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 advancedBlendCoherentOperations; +} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t advancedBlendMaxColorAttachments; + VkBool32 advancedBlendIndependentBlend; + VkBool32 advancedBlendNonPremultipliedSrcColor; + VkBool32 advancedBlendNonPremultipliedDstColor; + VkBool32 advancedBlendCorrelatedOverlap; + VkBool32 advancedBlendAllOperations; +} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; +} VkPipelineColorBlendAdvancedStateCreateInfoEXT; + + + +#define VK_NV_fragment_coverage_to_color 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" + +typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; + +typedef struct VkPipelineCoverageToColorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageToColorStateCreateFlagsNV flags; + VkBool32 coverageToColorEnable; + uint32_t coverageToColorLocation; +} VkPipelineCoverageToColorStateCreateInfoNV; + + + +#define VK_NV_framebuffer_mixed_samples 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" + + +typedef enum VkCoverageModulationModeNV { + VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, + VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, + VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, + VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, + VK_COVERAGE_MODULATION_MODE_BEGIN_RANGE_NV = VK_COVERAGE_MODULATION_MODE_NONE_NV, + VK_COVERAGE_MODULATION_MODE_END_RANGE_NV = VK_COVERAGE_MODULATION_MODE_RGBA_NV, + VK_COVERAGE_MODULATION_MODE_RANGE_SIZE_NV = (VK_COVERAGE_MODULATION_MODE_RGBA_NV - VK_COVERAGE_MODULATION_MODE_NONE_NV + 1), + VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageModulationModeNV; + +typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; + +typedef struct VkPipelineCoverageModulationStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageModulationStateCreateFlagsNV flags; + VkCoverageModulationModeNV coverageModulationMode; + VkBool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; +} VkPipelineCoverageModulationStateCreateInfoNV; + + + +#define VK_NV_fill_rectangle 1 +#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 +#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" + + +#define VK_EXT_post_depth_coverage 1 +#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 +#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" + + +#define VK_EXT_validation_cache 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) + +#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" +#define VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT + + +typedef enum VkValidationCacheHeaderVersionEXT { + VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, + VK_VALIDATION_CACHE_HEADER_VERSION_BEGIN_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT, + VK_VALIDATION_CACHE_HEADER_VERSION_END_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT, + VK_VALIDATION_CACHE_HEADER_VERSION_RANGE_SIZE_EXT = (VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT - VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT + 1), + VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCacheHeaderVersionEXT; + +typedef VkFlags VkValidationCacheCreateFlagsEXT; + +typedef struct VkValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; +} VkValidationCacheCreateInfoEXT; + +typedef struct VkShaderModuleValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheEXT validationCache; +} VkShaderModuleValidationCacheCreateInfoEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); +typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData); +#endif + +#define VK_EXT_descriptor_indexing 1 +#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2 +#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing" + + +typedef enum VkDescriptorBindingFlagBitsEXT { + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = 0x00000001, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = 0x00000002, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = 0x00000004, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = 0x00000008, + VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDescriptorBindingFlagBitsEXT; +typedef VkFlags VkDescriptorBindingFlagsEXT; + +typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t bindingCount; + const VkDescriptorBindingFlagsEXT* pBindingFlags; +} VkDescriptorSetLayoutBindingFlagsCreateInfoEXT; + +typedef struct VkPhysicalDeviceDescriptorIndexingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; +} VkPhysicalDeviceDescriptorIndexingFeaturesEXT; + +typedef struct VkPhysicalDeviceDescriptorIndexingPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; +} VkPhysicalDeviceDescriptorIndexingPropertiesEXT; + +typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSetCount; + const uint32_t* pDescriptorCounts; +} VkDescriptorSetVariableDescriptorCountAllocateInfoEXT; + +typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupportEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVariableDescriptorCount; +} VkDescriptorSetVariableDescriptorCountLayoutSupportEXT; + + + +#define VK_EXT_shader_viewport_index_layer 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" + + +#define VK_EXT_global_priority 1 +#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 +#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" + + +typedef enum VkQueueGlobalPriorityEXT { + VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = 128, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = 256, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = 512, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = 1024, + VK_QUEUE_GLOBAL_PRIORITY_BEGIN_RANGE_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT, + VK_QUEUE_GLOBAL_PRIORITY_END_RANGE_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT, + VK_QUEUE_GLOBAL_PRIORITY_RANGE_SIZE_EXT = (VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT - VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT + 1), + VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_EXT = 0x7FFFFFFF +} VkQueueGlobalPriorityEXT; + +typedef struct VkDeviceQueueGlobalPriorityCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkQueueGlobalPriorityEXT globalPriority; +} VkDeviceQueueGlobalPriorityCreateInfoEXT; + + + +#define VK_EXT_external_memory_host 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host" + +typedef struct VkImportMemoryHostPointerInfoEXT { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + void* pHostPointer; +} VkImportMemoryHostPointerInfoEXT; + +typedef struct VkMemoryHostPointerPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryHostPointerPropertiesEXT; + +typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize minImportedHostPointerAlignment; +} VkPhysicalDeviceExternalMemoryHostPropertiesEXT; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + const void* pHostPointer, + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); +#endif + +#define VK_AMD_buffer_marker 1 +#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1 +#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker" + +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); +#endif + +#define VK_AMD_shader_core_properties 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" + +typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { + VkStructureType sType; + void* pNext; + uint32_t shaderEngineCount; + uint32_t shaderArraysPerEngineCount; + uint32_t computeUnitsPerShaderArray; + uint32_t simdPerComputeUnit; + uint32_t wavefrontsPerSimd; + uint32_t wavefrontSize; + uint32_t sgprsPerSimd; + uint32_t minSgprAllocation; + uint32_t maxSgprAllocation; + uint32_t sgprAllocationGranularity; + uint32_t vgprsPerSimd; + uint32_t minVgprAllocation; + uint32_t maxVgprAllocation; + uint32_t vgprAllocationGranularity; +} VkPhysicalDeviceShaderCorePropertiesAMD; + + + +#define VK_EXT_vertex_attribute_divisor 1 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 1 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" + +typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; +} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; + +typedef struct VkVertexInputBindingDivisorDescriptionEXT { + uint32_t binding; + uint32_t divisor; +} VkVertexInputBindingDivisorDescriptionEXT; + +typedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t vertexBindingDivisorCount; + const VkVertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors; +} VkPipelineVertexInputDivisorStateCreateInfoEXT; + + + +#define VK_NV_shader_subgroup_partitioned 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned" + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_ios.h b/sources/pvrsdk/Include/vulkan/vulkan_ios.h new file mode 100755 index 00000000..a0924816 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_ios.h @@ -0,0 +1,58 @@ +#ifndef VULKAN_IOS_H_ +#define VULKAN_IOS_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_MVK_ios_surface 1 +#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface" + +typedef VkFlags VkIOSSurfaceCreateFlagsMVK; + +typedef struct VkIOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkIOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkIOSSurfaceCreateInfoMVK; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK( + VkInstance instance, + const VkIOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_macos.h b/sources/pvrsdk/Include/vulkan/vulkan_macos.h new file mode 100755 index 00000000..ff0b7018 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_macos.h @@ -0,0 +1,58 @@ +#ifndef VULKAN_MACOS_H_ +#define VULKAN_MACOS_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_MVK_macos_surface 1 +#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface" + +typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; + +typedef struct VkMacOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkMacOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkMacOSSurfaceCreateInfoMVK; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK( + VkInstance instance, + const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_mir.h b/sources/pvrsdk/Include/vulkan/vulkan_mir.h new file mode 100755 index 00000000..7d24ed27 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_mir.h @@ -0,0 +1,65 @@ +#ifndef VULKAN_MIR_H_ +#define VULKAN_MIR_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_mir_surface 1 +#define VK_KHR_MIR_SURFACE_SPEC_VERSION 4 +#define VK_KHR_MIR_SURFACE_EXTENSION_NAME "VK_KHR_mir_surface" + +typedef VkFlags VkMirSurfaceCreateFlagsKHR; + +typedef struct VkMirSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkMirSurfaceCreateFlagsKHR flags; + MirConnection* connection; + MirSurface* mirSurface; +} VkMirSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMirSurfaceKHR)(VkInstance instance, const VkMirSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, MirConnection* connection); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR( + VkInstance instance, + const VkMirSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + MirConnection* connection); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_vi.h b/sources/pvrsdk/Include/vulkan/vulkan_vi.h new file mode 100755 index 00000000..015166bf --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_vi.h @@ -0,0 +1,58 @@ +#ifndef VULKAN_VI_H_ +#define VULKAN_VI_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_NN_vi_surface 1 +#define VK_NN_VI_SURFACE_SPEC_VERSION 1 +#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface" + +typedef VkFlags VkViSurfaceCreateFlagsNN; + +typedef struct VkViSurfaceCreateInfoNN { + VkStructureType sType; + const void* pNext; + VkViSurfaceCreateFlagsNN flags; + void* window; +} VkViSurfaceCreateInfoNN; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN( + VkInstance instance, + const VkViSurfaceCreateInfoNN* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_wayland.h b/sources/pvrsdk/Include/vulkan/vulkan_wayland.h new file mode 100755 index 00000000..5ba0827a --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_wayland.h @@ -0,0 +1,65 @@ +#ifndef VULKAN_WAYLAND_H_ +#define VULKAN_WAYLAND_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_wayland_surface 1 +#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface" + +typedef VkFlags VkWaylandSurfaceCreateFlagsKHR; + +typedef struct VkWaylandSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWaylandSurfaceCreateFlagsKHR flags; + struct wl_display* display; + struct wl_surface* surface; +} VkWaylandSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR( + VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display* display); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_win32.h b/sources/pvrsdk/Include/vulkan/vulkan_win32.h new file mode 100755 index 00000000..6a85409e --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_win32.h @@ -0,0 +1,276 @@ +#ifndef VULKAN_WIN32_H_ +#define VULKAN_WIN32_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_win32_surface 1 +#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface" + +typedef VkFlags VkWin32SurfaceCreateFlagsKHR; + +typedef struct VkWin32SurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWin32SurfaceCreateFlagsKHR flags; + HINSTANCE hinstance; + HWND hwnd; +} VkWin32SurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR( + VkInstance instance, + const VkWin32SurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex); +#endif + +#define VK_KHR_external_memory_win32 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32" + +typedef struct VkImportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportMemoryWin32HandleInfoKHR; + +typedef struct VkExportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportMemoryWin32HandleInfoKHR; + +typedef struct VkMemoryWin32HandlePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryWin32HandlePropertiesKHR; + +typedef struct VkMemoryGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR( + VkDevice device, + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle, + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); +#endif + +#define VK_KHR_win32_keyed_mutex 1 +#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex" + +typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeouts; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoKHR; + + + +#define VK_KHR_external_semaphore_win32 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32" + +typedef struct VkImportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportSemaphoreWin32HandleInfoKHR; + +typedef struct VkExportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportSemaphoreWin32HandleInfoKHR; + +typedef struct VkD3D12FenceSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValuesCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValuesCount; + const uint64_t* pSignalSemaphoreValues; +} VkD3D12FenceSubmitInfoKHR; + +typedef struct VkSemaphoreGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR( + VkDevice device, + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR( + VkDevice device, + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + +#define VK_KHR_external_fence_win32 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32" + +typedef struct VkImportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportFenceWin32HandleInfoKHR; + +typedef struct VkExportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportFenceWin32HandleInfoKHR; + +typedef struct VkFenceGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetWin32HandleInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR( + VkDevice device, + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR( + VkDevice device, + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + +#define VK_NV_external_memory_win32 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32" + +typedef struct VkImportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleType; + HANDLE handle; +} VkImportMemoryWin32HandleInfoNV; + +typedef struct VkExportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; +} VkExportMemoryWin32HandleInfoNV; + + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV( + VkDevice device, + VkDeviceMemory memory, + VkExternalMemoryHandleTypeFlagsNV handleType, + HANDLE* pHandle); +#endif + +#define VK_NV_win32_keyed_mutex 1 +#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex" + +typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeoutMilliseconds; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoNV; + + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_xcb.h b/sources/pvrsdk/Include/vulkan/vulkan_xcb.h new file mode 100755 index 00000000..ba036006 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_xcb.h @@ -0,0 +1,66 @@ +#ifndef VULKAN_XCB_H_ +#define VULKAN_XCB_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_xcb_surface 1 +#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface" + +typedef VkFlags VkXcbSurfaceCreateFlagsKHR; + +typedef struct VkXcbSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXcbSurfaceCreateFlagsKHR flags; + xcb_connection_t* connection; + xcb_window_t window; +} VkXcbSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR( + VkInstance instance, + const VkXcbSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t* connection, + xcb_visualid_t visual_id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_xlib.h b/sources/pvrsdk/Include/vulkan/vulkan_xlib.h new file mode 100755 index 00000000..e1d967e0 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_xlib.h @@ -0,0 +1,66 @@ +#ifndef VULKAN_XLIB_H_ +#define VULKAN_XLIB_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_KHR_xlib_surface 1 +#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface" + +typedef VkFlags VkXlibSurfaceCreateFlagsKHR; + +typedef struct VkXlibSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXlibSurfaceCreateFlagsKHR flags; + Display* dpy; + Window window; +} VkXlibSurfaceCreateInfoKHR; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR( + VkInstance instance, + const VkXlibSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + Display* dpy, + VisualID visualID); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/Include/vulkan/vulkan_xlib_xrandr.h b/sources/pvrsdk/Include/vulkan/vulkan_xlib_xrandr.h new file mode 100755 index 00000000..117d0179 --- /dev/null +++ b/sources/pvrsdk/Include/vulkan/vulkan_xlib_xrandr.h @@ -0,0 +1,54 @@ +#ifndef VULKAN_XLIB_XRANDR_H_ +#define VULKAN_XLIB_XRANDR_H_ 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** Copyright (c) 2015-2018 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#define VK_EXT_acquire_xlib_display 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display" + +typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display); +typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + RROutput rrOutput, + VkDisplayKHR* pDisplay); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/sources/pvrsdk/lib/OSX_x86/libEGL.dylib b/sources/pvrsdk/lib/OSX_x86/libEGL.dylib new file mode 100755 index 00000000..768d5f74 Binary files /dev/null and b/sources/pvrsdk/lib/OSX_x86/libEGL.dylib differ diff --git a/sources/pvrsdk/lib/OSX_x86/libGLESv1_CM.dylib b/sources/pvrsdk/lib/OSX_x86/libGLESv1_CM.dylib new file mode 100755 index 00000000..16c6c158 Binary files /dev/null and b/sources/pvrsdk/lib/OSX_x86/libGLESv1_CM.dylib differ diff --git a/sources/pvrsdk/lib/OSX_x86/libGLESv2.dylib b/sources/pvrsdk/lib/OSX_x86/libGLESv2.dylib new file mode 100755 index 00000000..e0857ad4 Binary files /dev/null and b/sources/pvrsdk/lib/OSX_x86/libGLESv2.dylib differ diff --git a/sources/pvrsdk/lib/Windows_x86_32/libEGL.dll b/sources/pvrsdk/lib/Windows_x86_32/libEGL.dll new file mode 100755 index 00000000..0d1604e2 Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_32/libEGL.dll differ diff --git a/sources/pvrsdk/lib/Windows_x86_32/libEGL.lib b/sources/pvrsdk/lib/Windows_x86_32/libEGL.lib new file mode 100755 index 00000000..350efa7a Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_32/libEGL.lib differ diff --git a/sources/pvrsdk/lib/Windows_x86_32/libGLES_CM.dll b/sources/pvrsdk/lib/Windows_x86_32/libGLES_CM.dll new file mode 100755 index 00000000..b22f7edb Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_32/libGLES_CM.dll differ diff --git a/sources/pvrsdk/lib/Windows_x86_32/libGLES_CM.lib b/sources/pvrsdk/lib/Windows_x86_32/libGLES_CM.lib new file mode 100755 index 00000000..9f68ee3d Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_32/libGLES_CM.lib differ diff --git a/sources/pvrsdk/lib/Windows_x86_32/libGLESv2.dll b/sources/pvrsdk/lib/Windows_x86_32/libGLESv2.dll new file mode 100755 index 00000000..2057f9ea Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_32/libGLESv2.dll differ diff --git a/sources/pvrsdk/lib/Windows_x86_32/libGLESv2.lib b/sources/pvrsdk/lib/Windows_x86_32/libGLESv2.lib new file mode 100755 index 00000000..95621860 Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_32/libGLESv2.lib differ diff --git a/sources/pvrsdk/lib/Windows_x86_64/libEGL.dll b/sources/pvrsdk/lib/Windows_x86_64/libEGL.dll new file mode 100755 index 00000000..4a04df2a Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_64/libEGL.dll differ diff --git a/sources/pvrsdk/lib/Windows_x86_64/libEGL.lib b/sources/pvrsdk/lib/Windows_x86_64/libEGL.lib new file mode 100755 index 00000000..5f4e0a40 Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_64/libEGL.lib differ diff --git a/sources/pvrsdk/lib/Windows_x86_64/libGLES_CM.dll b/sources/pvrsdk/lib/Windows_x86_64/libGLES_CM.dll new file mode 100755 index 00000000..0141cb97 Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_64/libGLES_CM.dll differ diff --git a/sources/pvrsdk/lib/Windows_x86_64/libGLES_CM.lib b/sources/pvrsdk/lib/Windows_x86_64/libGLES_CM.lib new file mode 100755 index 00000000..3484456a Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_64/libGLES_CM.lib differ diff --git a/sources/pvrsdk/lib/Windows_x86_64/libGLESv2.dll b/sources/pvrsdk/lib/Windows_x86_64/libGLESv2.dll new file mode 100755 index 00000000..177f5613 Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_64/libGLESv2.dll differ diff --git a/sources/pvrsdk/lib/Windows_x86_64/libGLESv2.lib b/sources/pvrsdk/lib/Windows_x86_64/libGLESv2.lib new file mode 100755 index 00000000..d0a7ce91 Binary files /dev/null and b/sources/pvrsdk/lib/Windows_x86_64/libGLESv2.lib differ diff --git a/sources/tommyds/CMakeLists.txt b/sources/tommyds/CMakeLists.txt new file mode 100755 index 00000000..46b3dfb2 --- /dev/null +++ b/sources/tommyds/CMakeLists.txt @@ -0,0 +1,37 @@ +message(STATUS "Add dependence target: tommyds...") + +# Define target name +set(TARGET_NAME tommyds) + +set(INCLUDE_FILES + ${COCOS_EXTERNAL_PATH}/tommyds +) + +include_directories( ${INCLUDE_FILES} ) + +set (HEADER_FILES + tommychain.h + tommyhash.h + tommyhashdyn.h + tommylist.h + tommytypes.h +) + +set (SOURCE_FILES + tommyhash.c + tommyhashdyn.c + tommylist.c +) + +add_library(${TARGET_NAME} STATIC ${HEADER_FILES} ${SOURCE_FILES}) + +if(MSVC) + set_target_properties(${TARGET_NAME} PROPERTIES COMPILE_FLAGS "/wd4018 /wd4244 /wd4819 /wd4996") +endif() + +if(NOT COCOS_PLATFORM_IOS) + set_target_properties(${TARGET_NAME} PROPERTIES OUTPUT_NAME_DEBUG ${TARGET_NAME}_d) +endif() +set_target_properties(${TARGET_NAME} PROPERTIES FOLDER External) + +message(STATUS "${TARGET_NAME} Configuration completed.") diff --git a/sources/tommyds/tommy.c b/sources/tommyds/tommy.c new file mode 100755 index 00000000..a05013ae --- /dev/null +++ b/sources/tommyds/tommy.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyhash.c" +#include "tommyalloc.c" +#include "tommyarray.c" +#include "tommyarrayof.c" +#include "tommyarrayblk.c" +#include "tommyarrayblkof.c" +#include "tommylist.c" +#include "tommytree.c" +#include "tommytrie.c" +#include "tommytrieinp.c" +#include "tommyhashtbl.c" +#include "tommyhashdyn.c" +#include "tommyhashlin.c" + diff --git a/sources/tommyds/tommy.h b/sources/tommyds/tommy.h new file mode 100755 index 00000000..08b0a755 --- /dev/null +++ b/sources/tommyds/tommy.h @@ -0,0 +1,763 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \mainpage + * \section Introduction + * Tommy is a C library of array, hashtables and tries data structures, + * designed for high performance and providing an easy to use interface. + * + * It's faster than all the similar libraries like + * rbtree, + * judy, + * googlebtree, + * stxbtree, + * khash, + * uthash, + * nedtrie, + * judyarray, + * concurrencykit and others. + * Only googledensehash is a real competitor for Tommy. + * + * The data structures provided are: + * + * - ::tommy_list - A double linked list. + * - ::tommy_array, ::tommy_arrayof - A linear array. + * It doesn't fragment the heap. + * - ::tommy_arrayblk, ::tommy_arrayblkof - A blocked linear array. + * It doesn't fragment the heap and it minimizes the space occupation. + * - ::tommy_hashtable - A fixed size chained hashtable. + * - ::tommy_hashdyn - A dynamic chained hashtable. + * - ::tommy_hashlin - A linear chained hashtable. + * It doesn't have the problem of the delay when resizing and + * it doesn't fragment the heap. + * - ::tommy_trie - A trie optimized for cache utilization. + * - ::tommy_trie_inplace - A trie completely inplace. + * - ::tommy_tree - A tree to keep elements in order. + * + * The most interesting are ::tommy_array, ::tommy_hashdyn, ::tommy_hashlin, ::tommy_trie and ::tommy_trie_inplace. + * + * The official site of TommyDS is http://www.tommyds.it/, + * + * \section Use + * + * All the Tommy containers are used to store pointers to generic objects, associated to an + * integer value, that could be a key or a hash value. + * + * They are semantically equivalent at the C++ multimap\ + * and unordered_multimap\. + * + * An object, to be inserted in a container, should contain a node of type ::tommy_node. + * Inside this node is present a pointer to the object itself in the tommy_node::data field, + * the key used to identify the object in the tommy_node::key field, and other fields used + * by the containers. + * + * This is a typical object declaration: + * \code + * struct object { + * // other fields + * tommy_node node; + * }; + * \endcode + * + * To insert an object in a container, you have to provide the address of the embedded node, + * the address of the object and the value of the key. + * \code + * int key_to_insert = 1; + * struct object* obj = malloc(sizeof(struct object)); + * ... + * tommy_trie_insert(..., &obj->node, obj, key_to_insert); + * \endcode + * + * To search an object you have to provide the key and call the search function. + * \code + * int key_to_find = 1; + * struct object* obj = tommy_trie_search(..., key_to_find); + * if (obj) { + * // found + * } + * \endcode + * + * To access all the objects with the same keys you have to iterate over the bucket + * assigned at the specified key. + * \code + * int key_to_find = 1; + * tommy_trie_node* i = tommy_trie_bucket(..., key_to_find); + * + * while (i) { + * struct object* obj = i->data; // gets the object pointer + * + * printf("%d\n", obj->value); // process the object + * + * i = i->next; // goes to the next element + * } + * \endcode + * + * To remove an object you have to provide the key and call the remove function. + * \code + * int key_to_remove = 1; + * struct object* obj = tommy_trie_remove(..., key_to_remove); + * if (obj) { + * // found + * free(obj); // frees the object allocated memory + * } + * \endcode + * + * Dealing with hashtables, instead of the key, you have to provide the hash + * value of the object, and a compare function able to differentiate objects with + * the same hash value. + * To compute the hash value, you can use the generic tommy_hash_u32() function, + * or the specialized integer hash function tommy_inthash_u32(). + * + * \section Features + * + * Tommy is fast and easy to use. + * + * Tommy is portable to all platforms and operating systems. + * + * Tommy containers support multiple elements with the same key. + * + * Tommy containers keep the original insertion order of elements with equal keys. + * + * Tommy is released with the \ref license "2-clause BSD license". + * + * See the \ref design page for more details and limitations. + * + * \section Performance + * Here you can see some timings comparing with other notable implementations. + * The Hit graph shows the time required for searching random objects + * with a key. + * The Change graph shows the time required for searching, removing and + * reinsert random objects with a different key value. + * + * Times are expressed in nanoseconds for each element, and lower is better. + * + * To have some reference numbers, you can check Latency numbers every programmer should know. + * + * A complete analysis is available in the \ref benchmark page. + * + * + * + * + * + * \page benchmark Tommy Benchmarks + * + * To evaluate Tommy performances, an extensive benchmark was done, + * comparing it to the best libraries of data structures available: + * + * Specifically we test: + * - ::tommy_hashtable - Fixed size chained hashtable. + * - ::tommy_hashdyn - Dynamic chained hashtable. + * - ::tommy_hashlin - Linear chained hashtable. + * - ::tommy_trie - Trie optimized for cache usage. + * - ::tommy_trie_inplace - Trie completely inplace. + * - rbtree - Red-black tree by Jason Evans. + * - nedtrie - Binary trie inplace by Niall Douglas. + * - khash - Dynamic open addressing hashtable by Attractive Chaos. + * - uthash - Dynamic chaining hashtable by Troy D. Hanson. + * - judy - Burst trie (JudyL) by Doug Baskins. + * - judyarray - Burst trie by Karl Malbrain. + * - googledensehash - Dynamic open addressing hashtable by Craig Silverstein at Google. + * - googlebtree - Btree by Google. + * - stxbtree - STX Btree by Timo Bingmann. + * - c++unordered_map - C++ STL unordered_map<> template. + * - c++map - C++ STL map<> template. + * - tesseract - Binary Search Tesseract by Gregorius van den Hoven. + * - googlelibchash - LibCHash by Craig Silverstein at Google. + * - libdynamic - Hash set by Fredrik Widlund. + * - concurrencykit - Non-blocking hash set by Samy Al Bahra. + * + * Note that googlelibchash and concurrencykit are not shown in the graphs + * because they present a lot of spikes. See the \ref notes the end. + * + * \section thebenchmark The Benchmark + * + * The benchmark consists in storing a set of N pointers to objects and + * searching them using integer keys. + * + * Compared to the case of mapping integers to integers, mapping pointers to + * objects means that the pointers are also dereferenced, to simulate the + * object access, resulting in additional cache misses. + * This gives an advantage to implementations that store information in the + * objects itself, as the additional cache misses are already implicit. + * + * The test done are: + * - Insert Insert all the objects starting with an empty container. + * - Change Find and remove one object and reinsert it with a different key, repeated for all the objects. + * - Hit Find with success all the objects and dereference them. + * - Miss Find with failure all the objects. + * - Remove Remove all the objects and dereference them. + * + * The Change, Hit and Miss tests operate always with N + * objects in the containers. + * The Insert test starts with an empty container, and the Remove + * test ends with an empty container. + * The objects are always dereferenced, as we are supposing to use them. This + * happens even in the remove case, as we are supposing to deallocate them. + * + * All the objects are preallocated in the heap, and the allocation and deallocation + * time is not included in the test. + * + * The objects contain an integer value field used for consistency checks, + * an unused payload field of 16 bytes, and any other data required by the + * data structure. + * + * The objects are identified and stored using integer and unique keys. + * The key domain used is dense, and it's defined by the set + * of N even numbers starting from 0x80000000 to 0x80000000+2*N. + * + * The use of even numbers allows to have missing keys inside the domain for + * the Miss and Change test. + * In such tests it's used the key domain defined by the set of N odd numbers + * starting from 0x80000000+1 to 0x80000000+2*N+1. + * Note that using additional keys at the corners of the domain would have given + * an unfair advantage to tries and trees as they implicitly keep track of the + * maximum and minimum key value inserted. + * + * The use of the 0x80000000 base, allow to test a key domain not necessarily + * starting at 0. Using a 0 base would have given an unfair advantage to some + * implementation handling it as a special case. + * + * For all the hashtables the keys are hashed using the tommy_inthash_u32() + * function that ensures an uniform distribution. This hash function is also + * reversible, meaning that no collision is going to be caused by hashing the + * keys. For tries and trees the keys are not hashed, and used directly. + * + * The tests are repeated using keys in Random mode and in Forward + * mode. In the forward mode the key values are used in order from the lowest + * to the highest. + * In the random mode the key values are used in a completely random order. + * In the Change test in forward mode, each object is reinserted using + * the previous key incremented by 1. In random mode each object is reinserted + * using a completely different and uncorrelated key. + * + * The forward order advantages tries and trees as they use the key directly + * and they have a cache advantage on using consecutive keys. + * The random order advantages hashtables, as the hash function already + * randomizes the key. Usually real uses case are in between, and the random + * one is the worst case. + * + * \section result Results + * + * The most significant tests depend on your data usage model, but if in doubt, + * you can look at Random Hit and Random Change. + * They represent the real world worst condition. + * + * + * + * In the Random Hit graph you can see a vertical split at the 100.000 + * elements limit. Before this limit the cache of modern processor is able to + * contains most of the data, and it allows a very fast access with almost all + * data structures. + * After this limit, the number of cache misses is the dominant factor, and + * the curve depends mainly on the number of cache-miss required. + * + * rbtree and nedtrie grow as log2(N) as they have two branches on each node, + * ::tommy_trie_inplace grows as log4(N), ::tommy_trie as log8(N) and hashtables + * are almost constant and don't grow. + * For ::tommy_trie_inplace and ::tommy_trie you can change the grow curve + * configuring a different number of branches for node. + * + * + * + * The Random Change graph confirms the vertical split at the 100.000 + * elements limit. It also show that hashtables are almost unbeatable with a + * random access. + * + * \section random Random order + * Here you can see the whole Random test results in different platforms. + * + * In the Random test, hashtables are almost always winning, seconds are + * tries, and as last trees. + * + * The best choices are ::tommy_hashdyn, ::tommy_hashlin, and googledensehash, + * with ::tommy_hashlin having the advantage to be real-time friendly and not + * increasing the heap fragmentation. + * + * + *
+ * + *
+ * + *
+ * + *
+ * + *
+ * + *
+ * + * \section forward Forward order + * Here you can see the whole Forward test results in different platforms. + * + * In the Forward test, tries are the winners. Hashtables are competitive + * until the cache limit, then they lose against tries. Trees are the slowest. + * + * The best choices are ::tommy_trie and ::tommy_trie_inplace, where ::tommy_trie is + * a bit faster, and ::tommy_trie_inplace doesn't require a custom allocator. + * + * Note that also hashtables are faster in forward order than random. This may + * seem a bit surprising as the hash function randomizes the access even with + * consecutive keys. This happens because the objects are allocated in consecutive + * memory, and accessing them in order, improves the cache utilization, even if + * the hashed key is random. + * + * Note that you can make hashtables to reach tries performance tweaking the hash + * function to put near keys allocated nearby. + * This is possible if you know in advance the distribution of keys. + * For example, in the benchmark you could use something like: + * \code + * #define hash(v) tommy_inthash_u32(v & ~0xF) + (v & 0xF) + * \endcode + * and make keys that differ only by the lowest bits to have hashes with the same + * property, resulting in objects stored nearby, and improving cache utilization. + * + * + * + *
+ * + *
+ * + *
+ * + *
+ * + *
+ * + *
+ * + * \section size Size + * Here you can see the memory usage of the different data structures. + * + * + *
+ * + *
+ * + * \section code Code + * + * The compilers used in the benchmark are: + * - gcc 4.9.2 in Linux with options: -O3 -march=nehalem + * - Visual C 2012 in Windows with options: /Ox /Oy- /GL /GS- /arch:SSE2 + * + * The following is pseudo code of the benchmark used. In this case it's written + * for the C++ unordered_map. + * + * \code + * #define N 10000000 // Number of elements + * #define PAYLOAD 16 // Size of the object + * + * // Basic object inserted in the colletion + * struct obj { + * unsigned value; // Key used for searching + * char payload[PAYLOAD]; + * }; + * + * // Custom hash function to avoid to use the STL one + * class custom_hash { + * public: + * unsigned operator()(unsigned key) const { return tommy_inthash_u32(key); } + * }; + * + * // Map collection from "unsigned" to "pointer to object" + * typedef std::unordered_map bag_t; + * bag_t bag; + * + * // Preallocate objects + * obj* OBJ = new obj[N]; + * + * // Keys used for inserting and searching elements + * unsigned INSERT[N]; + * unsigned SEARCH[N]; + * + * // Initialize the keys + * for(i=0;ivalue = key; + * + * // Insert it + * bag[key] = element; + * } + * \endcode + * + * \subsection change Change benchmark + * \code + * for(i=0;isecond; + * bag.erase(j); + * + * // Reinsert the element with a new key + * // Use +1 in the key to ensure that the new key is unique + * key = INSERT[i] + 1; + * element->value = key; + * bag[key] = element; + * } + * \endcode + * + * \subsection hit Hit benchmark + * \code + * for(i=0;isecond; + * if (element->value != key) + * abort(); + * } + * \endcode + * + * \subsection miss Miss benchmark + * \code + * for(i=0;isecond; + * if (element->value != key) + * abort(); + * } + * \endcode + * + * \section others Other benchmarks + * Here some links to other performance comparison: + * + * Comparison of Hash Table Libraries + * + * Hash Table Benchmarks + * + * \section notes Notes + * + * Here some notes about the data structure tested not part of Tommy. + * + * \subsection googlelibchash Google C libchash + * It's the C implementation located in the experimental/ directory of the googlesparsehash archive. + * It has very bad performances in the Change test for some N values. + * See this graph with a lot of spikes. + * The C++ version doesn't suffer of this problem. + * + * \subsection googledensehash Google C++ densehash + * It doesn't release memory on deletion. + * To avoid an unfair advantage in the Remove test, we force a periodic + * resize calling resize(0) after any deallocation. + * The resize is executed when the load factor is lower than 20%. + * + * \subsection khash khash + * It doesn't release memory on deletion. This gives an unfair advantage on the Remove test. + * + * \subsection nedtrie nedtrie + * I've found a crash bug when inserting keys with the 0 value. + * The fix of this issue is now in the nedtries github. + * We do not use the C++ implementation as it doesn't compile with gcc 4.4.3. + * + * \subsection judy Judy + * Sometimes it has bad performances in some specific platform + * and for some specific input data size. + * This makes difficult to predict the performance, as it is usually good until + * you get one of these cases. + * See for example this graph with a big replicable spike at 50.000 elements. + * + * \subsection ck Concurrency Kit + * It has very bad performances in the Change test for some N values. + * See this graph with a lot of spikes. + * + * \page multiindex Tommy Multi Indexing + * + * Tommy provides only partial iterator support with the "foreach" functions. + * If you need real iterators you have to insert all the objects also in a ::tommy_list, + * and use the list as iterator. + * + * This technique allows to keep track of the insertion order with the list, + * and provides more search possibilities using different data structures for + * different search keys. + * + * See the next example, for a objects inserted in a ::tommy_list, and in + * two ::tommy_hashdyn using different keys. + * + * \code + * struct object { + * // data fields + * int value_0; + * int value_1; + * + * // for containers + * tommy_node list_node; // node for the list + * tommy_node hash_node_0; // node for the first hash + * tommy_node hash_node_1; // node for the second hash + * }; + * + * // search function for value_1 + * int search_1(const void* arg, const void* obj) + * { + * return *(const int*)arg != ((const struct object*)obj)->value_1; + * } + * + * tommy_hashdyn hash_0; + * tommy_hashdyn hash_1; + * tommy_list list; + * + * // initializes the hash tables and the list + * tommy_hashdyn_init(&hash_0); + * tommy_hashdyn_init(&hash_1); + * tommy_list_init(&list); + * + * ... + * + * // creates an object and inserts it + * struct object* obj = malloc(sizeof(struct object)); + * obj->value_0 = ...; + * obj->value_1 = ...; + * // inserts in the first hash table + * tommy_hashdyn_insert(&hash_0, &obj->hash_node_0, obj, tommy_inthash_u32(obj->value_0)); + * // inserts in the second hash table + * tommy_hashdyn_insert(&hash_1, &obj->hash_node_1, obj, tommy_inthash_u32(obj->value_1)); + * // inserts in the list + * tommy_list_insert_tail(&list, &obj->list_node, obj); + * + * ... + * + * // searches an object by value_1 and deletes it + * int value_to_find = ...; + * struct object* obj = tommy_hashdyn_search(&hash_1, search_1, &value_to_find, tommy_inthash_u32(value_to_find)); + * if (obj) { + * // if found removes all the references + * tommy_hashdyn_remove_existing(&hash_0, &obj->hash_node_0); + * tommy_hashdyn_remove_existing(&hash_1, &obj->hash_node_1); + * tommy_list_remove_existing(&list, &obj->list_node); + * } + * + * ... + * + * // complex iterator logic + * tommy_node* i = tommy_list_head(&list); + * while (i != 0) { + * // get the object + * struct object* obj = i->data; + * ... + * // go to the next element + * i = i->next; + * ... + * // go to the prev element + * i = i->prev; + * ... + * } + * + * ... + * + * // deallocates the objects iterating the list + * tommy_list_foreach(&list, free); + * + * // deallocates the hash tables + * tommy_hashdyn_done(&hash_0); + * tommy_hashdyn_done(&hash_1); + * \endcode + * + * \page design Tommy Design + * + * Tommy is designed to fulfill the need of generic data structures for the + * C language, providing at the same time high performance and a clean + * and easy to use interface. + * + * \section testing Testing + * + * Extensive and automated tests with the runtime checker valgrind + * and the static analyzer clang + * are done to ensure the correctness of the library. + * + * The test has a code coverage of 100%, + * measured with lcov. + * + * \section Limitations + * + * Tommy is not thread safe. You have always to provide thread safety using + * locks before calling any Tommy functions. + * + * Tommy doesn't provide iterators over the implicit order defined by the data + * structures. To iterate on elements you must insert them also into a ::tommy_list, + * and use the list as iterator. See the \ref multiindex example for more details. + * Note that this is a real limitation only for ::tommy_trie, as it's the only + * data structure defining an useable order. + * + * Tommy doesn't provide an error reporting mechanism for a malloc() failure. + * You have to provide it redefining malloc() if you expect it to fail. + * + * Tommy assumes to never have more than 2^32-1 elements in a container. + * + * \section compromise Compromises + * + * Finding the right balance between efficency and easy to use required some + * comprimises. Most of them are on memory efficency, and were done to avoid to + * cripple the interface. + * + * The following is a list of such decisions. + * + * \subsection multi_key Multi key + * All the Tommy containers support the insertion of multiple elements with + * the same key, adding in each node a list of equal elements. + * + * They are the equivalent at the C++ associative containers multimap\ + * and unordered_multimap\ + * that allow duplicates of the same key. + * + * A more memory conservative approach is to not allow duplicated elements, + * removing the need of this list. + * + * \subsection data_pointer Data pointer + * The tommy_node::data field is present to allow search and remove functions to return + * directly a pointer to the element stored in the container. + * + * A more memory conservative approach is to require the user to compute + * the element pointer from the embedded node with a fixed displacement. + * For an example, see the Linux Kernel declaration of + * container_of(). + * + * \subsection insertion_order Insertion order + * The list used for collisions is double linked to allow + * insertion of elements at the end of the list to keep the + * insertion order of equal elements. + * + * A more memory conservative approach is to use a single linked list, + * inserting elements only at the start of the list, losing the + * original insertion order. + * + * \subsection zero_list Zero terminated list + * The 0 terminated format of tommy_node::next is present to provide a forward + * iterator terminating in 0. This allows the user to write a simple iteration + * loop over the list of elements in the same bucket. + * + * A more efficient approach is to use a circular list, because operating on nodes + * in a circular list doesn't requires to manage the special terminating case when + * adding or removing elements. + * + * \page license Tommy License + * Tommy is released with a 2-clause BSD license. + * + * \code + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * \endcode + */ + +/** \file + * All in one include for Tommy. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "tommytypes.h" +#include "tommyhash.h" +#include "tommyalloc.h" +#include "tommyarray.h" +#include "tommyarrayof.h" +#include "tommyarrayblk.h" +#include "tommyarrayblkof.h" +#include "tommylist.h" +#include "tommytree.h" +#include "tommytrie.h" +#include "tommytrieinp.h" +#include "tommyhashtbl.h" +#include "tommyhashdyn.h" +#include "tommyhashlin.h" + +#ifdef __cplusplus +} +#endif + diff --git a/sources/tommyds/tommyalloc.c b/sources/tommyds/tommyalloc.c new file mode 100755 index 00000000..0da604cb --- /dev/null +++ b/sources/tommyds/tommyalloc.c @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyalloc.h" + +/******************************************************************************/ +/* allocator */ + +/** + * Basic allocation segment. + * Smaller of a memory page, to allow also a little heap overread. + * The heap manager may put it in a single memory page. + */ +#define TOMMY_ALLOCATOR_BLOCK_SIZE (4096 - 64) + +void tommy_allocator_init(tommy_allocator* alloc, tommy_size_t block_size, tommy_size_t align_size) +{ + /* setup the minimal alignment */ + if (align_size < sizeof(void*)) + align_size = sizeof(void*); + + /* ensure that the block_size keeps the alignment */ + if (block_size % align_size != 0) + block_size += align_size - block_size % align_size; + + alloc->block_size = block_size; + alloc->align_size = align_size; + + alloc->count = 0; + alloc->free_block = 0; + alloc->used_segment = 0; +} + +/** + * Reset the allocator and free all. + */ +static void allocator_reset(tommy_allocator* alloc) +{ + tommy_allocator_entry* block = alloc->used_segment; + + while (block) { + tommy_allocator_entry* block_next = block->next; + tommy_free(block); + block = block_next; + } + + alloc->count = 0; + alloc->free_block = 0; + alloc->used_segment = 0; +} + +void tommy_allocator_done(tommy_allocator* alloc) +{ + allocator_reset(alloc); +} + +void* tommy_allocator_alloc(tommy_allocator* alloc) +{ + void* ptr; + + /* if no free block available */ + if (!alloc->free_block) { + tommy_uintptr_t off, mis; + tommy_size_t size; + char* data; + tommy_allocator_entry* segment; + + /* default allocation size */ + size = TOMMY_ALLOCATOR_BLOCK_SIZE; + + /* ensure that we can allocate at least one block */ + if (size < sizeof(tommy_allocator_entry) + alloc->align_size + alloc->block_size) + size = sizeof(tommy_allocator_entry) + alloc->align_size + alloc->block_size; + + data = tommy_cast(char*, tommy_malloc(size)); + segment = (tommy_allocator_entry*)data; + + /* put in the segment list */ + segment->next = alloc->used_segment; + alloc->used_segment = segment; + data += sizeof(tommy_allocator_entry); + + /* align if not aligned */ + off = (tommy_uintptr_t)data; + mis = off % alloc->align_size; + if (mis != 0) { + data += alloc->align_size - mis; + size -= alloc->align_size - mis; + } + + /* insert in free list */ + do { + tommy_allocator_entry* free_block = (tommy_allocator_entry*)data; + free_block->next = alloc->free_block; + alloc->free_block = free_block; + + data += alloc->block_size; + size -= alloc->block_size; + } while (size >= alloc->block_size); + } + + /* remove one from the free list */ + ptr = alloc->free_block; + alloc->free_block = alloc->free_block->next; + + ++alloc->count; + + return ptr; +} + +void tommy_allocator_free(tommy_allocator* alloc, void* ptr) +{ + tommy_allocator_entry* free_block = tommy_cast(tommy_allocator_entry*, ptr); + + /* put it in the free list */ + free_block->next = alloc->free_block; + alloc->free_block = free_block; + + --alloc->count; +} + +tommy_size_t tommy_allocator_memory_usage(tommy_allocator* alloc) +{ + return alloc->count * (tommy_size_t)alloc->block_size; +} + diff --git a/sources/tommyds/tommyalloc.h b/sources/tommyds/tommyalloc.h new file mode 100755 index 00000000..7a9afea7 --- /dev/null +++ b/sources/tommyds/tommyalloc.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Allocator of fixed size blocks. + */ + +#ifndef __TOMMYALLOC_H +#define __TOMMYALLOC_H + +#include "tommytypes.h" + +/******************************************************************************/ +/* allocator */ + +/** \internal + * Allocator entry. + */ +struct tommy_allocator_entry_struct { + struct tommy_allocator_entry_struct* next; /**< Pointer to the next entry. 0 for last. */ +}; +typedef struct tommy_allocator_entry_struct tommy_allocator_entry; + +/** + * Allocator of fixed size blocks. + */ +typedef struct tommy_allocator_struct { + struct tommy_allocator_entry_struct* free_block; /**< List of free blocks. */ + struct tommy_allocator_entry_struct* used_segment; /**< List of allocated segments. */ + tommy_size_t block_size; /**< Block size. */ + tommy_size_t align_size; /**< Alignment size. */ + tommy_count_t count; /**< Number of allocated elements. */ +} tommy_allocator; + +/** + * Initializes the allocator. + * \param alloc Allocator to initialize. + * \param block_size Size of the block to allocate. + * \param align_size Minimum alignment requirement. No less than sizeof(void*). + */ +void tommy_allocator_init(tommy_allocator* alloc, tommy_size_t block_size, tommy_size_t align_size); + +/** + * Deinitialize the allocator. + * It also releases all the allocated memory to the heap. + * \param alloc Allocator to deinitialize. + */ +void tommy_allocator_done(tommy_allocator* alloc); + +/** + * Allocates a block. + * \param alloc Allocator to use. + */ +void* tommy_allocator_alloc(tommy_allocator* alloc); + +/** + * Deallocates a block. + * You must use the same allocator used in the tommy_allocator_alloc() call. + * \param alloc Allocator to use. + * \param ptr Block to free. + */ +void tommy_allocator_free(tommy_allocator* alloc, void* ptr); + +/** + * Gets the size of allocated memory. + * \param alloc Allocator to use. + */ +tommy_size_t tommy_allocator_memory_usage(tommy_allocator* alloc); + +#endif + diff --git a/sources/tommyds/tommyarray.c b/sources/tommyds/tommyarray.c new file mode 100755 index 00000000..a8d946bb --- /dev/null +++ b/sources/tommyds/tommyarray.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2011, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyarray.h" + +/******************************************************************************/ +/* array */ + +void tommy_array_init(tommy_array* array) +{ + tommy_uint_t i; + + /* fixed initial size */ + array->bucket_bit = TOMMY_ARRAY_BIT; + array->bucket_max = 1 << array->bucket_bit; + array->bucket[0] = tommy_cast(void**, tommy_calloc(array->bucket_max, sizeof(void*))); + for (i = 1; i < TOMMY_ARRAY_BIT; ++i) + array->bucket[i] = array->bucket[0]; + + array->count = 0; +} + +void tommy_array_done(tommy_array* array) +{ + tommy_uint_t i; + + tommy_free(array->bucket[0]); + for (i = TOMMY_ARRAY_BIT; i < array->bucket_bit; ++i) { + void** segment = array->bucket[i]; + tommy_free(&segment[((tommy_ptrdiff_t)1) << i]); + } +} + +void tommy_array_grow(tommy_array* array, tommy_count_t count) +{ + if (array->count >= count) + return; + array->count = count; + + while (count > array->bucket_max) { + void** segment; + + /* allocate one more segment */ + segment = tommy_cast(void**, tommy_calloc(array->bucket_max, sizeof(void*))); + + /* store it adjusting the offset */ + /* cast to ptrdiff_t to ensure to get a negative value */ + array->bucket[array->bucket_bit] = &segment[-(tommy_ptrdiff_t)array->bucket_max]; + + ++array->bucket_bit; + array->bucket_max = 1 << array->bucket_bit; + } +} + +tommy_size_t tommy_array_memory_usage(tommy_array* array) +{ + return array->bucket_max * (tommy_size_t)sizeof(void*); +} + diff --git a/sources/tommyds/tommyarray.h b/sources/tommyds/tommyarray.h new file mode 100755 index 00000000..c04193f8 --- /dev/null +++ b/sources/tommyds/tommyarray.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2011, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Dynamic array based on segments of exponential growing size. + * + * This array is able to grow dynamically upon request, without any reallocation. + * + * The grow operation involves an allocation of a new array segment, without reallocating + * the already used memory, and then not increasing the heap fragmentation. + * This also implies that the address of the stored elements never change. + * + * Allocated segments grow in size exponentially. + */ + +#ifndef __TOMMYARRAY_H +#define __TOMMYARRAY_H + +#include "tommytypes.h" + +#include /* for assert */ + +/******************************************************************************/ +/* array */ + +/** + * Initial and minimal size of the array expressed as a power of 2. + * The initial size is 2^TOMMY_ARRAY_BIT. + */ +#define TOMMY_ARRAY_BIT 6 + +/** \internal + * Max number of elements as a power of 2. + */ +#define TOMMY_ARRAY_BIT_MAX 32 + +/** + * Array container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_array_struct { + void** bucket[TOMMY_ARRAY_BIT_MAX]; /**< Dynamic array of buckets. */ + tommy_uint_t bucket_bit; /**< Bits used in the bit mask. */ + tommy_count_t bucket_max; /**< Number of buckets. */ + tommy_count_t count; /**< Number of initialized elements in the array. */ +} tommy_array; + +/** + * Initializes the array. + */ +void tommy_array_init(tommy_array* array); + +/** + * Deinitializes the array. + */ +void tommy_array_done(tommy_array* array); + +/** + * Grows the size up to the specified value. + * All the new elements in the array are initialized with the 0 value. + */ +void tommy_array_grow(tommy_array* array, tommy_count_t size); + +/** + * Gets a reference of the element at the specified position. + * You must be sure that space for this position is already + * allocated calling tommy_array_grow(). + */ +tommy_inline void** tommy_array_ref(tommy_array* array, tommy_count_t pos) +{ + tommy_uint_t bsr; + + assert(pos < array->count); + + /* get the highest bit set, in case of all 0, return 0 */ + bsr = tommy_ilog2_u32(pos | 1); + + return &array->bucket[bsr][pos]; +} + +/** + * Sets the element at the specified position. + * You must be sure that space for this position is already + * allocated calling tommy_array_grow(). + */ +tommy_inline void tommy_array_set(tommy_array* array, tommy_count_t pos, void* element) +{ + *tommy_array_ref(array, pos) = element; +} + +/** + * Gets the element at the specified position. + * You must be sure that space for this position is already + * allocated calling tommy_array_grow(). + */ +tommy_inline void* tommy_array_get(tommy_array* array, tommy_count_t pos) +{ + return *tommy_array_ref(array, pos); +} + +/** + * Grows and inserts a new element at the end of the array. + */ +tommy_inline void tommy_array_insert(tommy_array* array, void* element) +{ + tommy_count_t pos = array->count; + + tommy_array_grow(array, pos + 1); + + tommy_array_set(array, pos, element); +} + +/** + * Gets the initialized size of the array. + */ +tommy_inline tommy_count_t tommy_array_size(tommy_array* array) +{ + return array->count; +} + +/** + * Gets the size of allocated memory. + */ +tommy_size_t tommy_array_memory_usage(tommy_array* array); + +#endif + diff --git a/sources/tommyds/tommyarrayblk.c b/sources/tommyds/tommyarrayblk.c new file mode 100755 index 00000000..d4dda763 --- /dev/null +++ b/sources/tommyds/tommyarrayblk.c @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2013, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyarrayblk.h" + +/******************************************************************************/ +/* array */ + +void tommy_arrayblk_init(tommy_arrayblk* array) +{ + tommy_array_init(&array->block); + + array->count = 0; +} + +void tommy_arrayblk_done(tommy_arrayblk* array) +{ + tommy_count_t i; + + for (i = 0; i < tommy_array_size(&array->block); ++i) + tommy_free(tommy_array_get(&array->block, i)); + + tommy_array_done(&array->block); +} + +void tommy_arrayblk_grow(tommy_arrayblk* array, tommy_count_t count) +{ + tommy_count_t block_max; + tommy_count_t block_mac; + + if (array->count >= count) + return; + array->count = count; + + block_max = (count + TOMMY_ARRAYBLK_SIZE - 1) / TOMMY_ARRAYBLK_SIZE; + block_mac = tommy_array_size(&array->block); + + if (block_mac < block_max) { + /* grow the block array */ + tommy_array_grow(&array->block, block_max); + + /* allocate new blocks */ + while (block_mac < block_max) { + void** ptr = tommy_cast(void**, tommy_calloc(TOMMY_ARRAYBLK_SIZE, sizeof(void*))); + + /* set the new block */ + tommy_array_set(&array->block, block_mac, ptr); + + ++block_mac; + } + } +} + +tommy_size_t tommy_arrayblk_memory_usage(tommy_arrayblk* array) +{ + return tommy_array_memory_usage(&array->block) + tommy_array_size(&array->block) * TOMMY_ARRAYBLK_SIZE * sizeof(void*); +} + diff --git a/sources/tommyds/tommyarrayblk.h b/sources/tommyds/tommyarrayblk.h new file mode 100755 index 00000000..20d05258 --- /dev/null +++ b/sources/tommyds/tommyarrayblk.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2013, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Dynamic array based on blocks of fixed size. + * + * This array is able to grow dynamically upon request, without any reallocation. + * + * The grow operation involves an allocation of a new array block, without reallocating + * the already used memory, and then not increasing the heap fragmentation, + * and minimize the space occupation. + * This also implies that the address of the stored elements never change. + * + * Allocated blocks are always of the same fixed size of 4 Ki pointers. + */ + +#ifndef __TOMMYARRAYBLK_H +#define __TOMMYARRAYBLK_H + +#include "tommytypes.h" +#include "tommyarray.h" + +#include /* for assert */ + +/******************************************************************************/ +/* array */ + +/** + * Elements for each block. + */ +#define TOMMY_ARRAYBLK_SIZE (4 * 1024) + +/** + * Array container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_arrayblk_struct { + tommy_array block; /**< Array of blocks. */ + tommy_count_t count; /**< Number of initialized elements in the array. */ +} tommy_arrayblk; + +/** + * Initializes the array. + */ +void tommy_arrayblk_init(tommy_arrayblk* array); + +/** + * Deinitializes the array. + */ +void tommy_arrayblk_done(tommy_arrayblk* array); + +/** + * Grows the size up to the specified value. + * All the new elements in the array are initialized with the 0 value. + */ +void tommy_arrayblk_grow(tommy_arrayblk* array, tommy_count_t size); + +/** + * Gets a reference of the element at the specified position. + * You must be sure that space for this position is already + * allocated calling tommy_arrayblk_grow(). + */ +tommy_inline void** tommy_arrayblk_ref(tommy_arrayblk* array, tommy_count_t pos) +{ + void** ptr; + + assert(pos < array->count); + + ptr = tommy_cast(void**, tommy_array_get(&array->block, pos / TOMMY_ARRAYBLK_SIZE)); + + return &ptr[pos % TOMMY_ARRAYBLK_SIZE]; +} + +/** + * Sets the element at the specified position. + * You must be sure that space for this position is already + * allocated calling tommy_arrayblk_grow(). + */ +tommy_inline void tommy_arrayblk_set(tommy_arrayblk* array, tommy_count_t pos, void* element) +{ + *tommy_arrayblk_ref(array, pos) = element; +} + +/** + * Gets the element at the specified position. + * You must be sure that space for this position is already + * allocated calling tommy_arrayblk_grow(). + */ +tommy_inline void* tommy_arrayblk_get(tommy_arrayblk* array, tommy_count_t pos) +{ + return *tommy_arrayblk_ref(array, pos); +} + +/** + * Grows and inserts a new element at the end of the array. + */ +tommy_inline void tommy_arrayblk_insert(tommy_arrayblk* array, void* element) +{ + tommy_count_t pos = array->count; + + tommy_arrayblk_grow(array, pos + 1); + + tommy_arrayblk_set(array, pos, element); +} + +/** + * Gets the initialized size of the array. + */ +tommy_inline tommy_count_t tommy_arrayblk_size(tommy_arrayblk* array) +{ + return array->count; +} + +/** + * Gets the size of allocated memory. + */ +tommy_size_t tommy_arrayblk_memory_usage(tommy_arrayblk* array); + +#endif + diff --git a/sources/tommyds/tommyarrayblkof.c b/sources/tommyds/tommyarrayblkof.c new file mode 100755 index 00000000..8a491a0f --- /dev/null +++ b/sources/tommyds/tommyarrayblkof.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2013, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyarrayblkof.h" + +/******************************************************************************/ +/* array */ + +void tommy_arrayblkof_init(tommy_arrayblkof* array, tommy_size_t element_size) +{ + tommy_array_init(&array->block); + + array->element_size = element_size; + array->count = 0; +} + +void tommy_arrayblkof_done(tommy_arrayblkof* array) +{ + tommy_count_t i; + + for (i = 0; i < tommy_array_size(&array->block); ++i) + tommy_free(tommy_array_get(&array->block, i)); + + tommy_array_done(&array->block); +} + +void tommy_arrayblkof_grow(tommy_arrayblkof* array, tommy_count_t count) +{ + tommy_count_t block_max; + tommy_count_t block_mac; + + if (array->count >= count) + return; + array->count = count; + + block_max = (count + TOMMY_ARRAYBLKOF_SIZE - 1) / TOMMY_ARRAYBLKOF_SIZE; + block_mac = tommy_array_size(&array->block); + + if (block_mac < block_max) { + /* grow the block array */ + tommy_array_grow(&array->block, block_max); + + /* allocate new blocks */ + while (block_mac < block_max) { + void** ptr = tommy_cast(void**, tommy_calloc(TOMMY_ARRAYBLKOF_SIZE, array->element_size)); + + /* set the new block */ + tommy_array_set(&array->block, block_mac, ptr); + + ++block_mac; + } + } +} + +tommy_size_t tommy_arrayblkof_memory_usage(tommy_arrayblkof* array) +{ + return tommy_array_memory_usage(&array->block) + tommy_array_size(&array->block) * TOMMY_ARRAYBLKOF_SIZE * array->element_size; +} + diff --git a/sources/tommyds/tommyarrayblkof.h b/sources/tommyds/tommyarrayblkof.h new file mode 100755 index 00000000..fce6840c --- /dev/null +++ b/sources/tommyds/tommyarrayblkof.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2013, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Dynamic array based on blocks of fixed size. + * + * This array is able to grow dynamically upon request, without any reallocation. + * + * This is very similar at ::tommy_arrayblk, but it allows to store elements of any + * size and not just pointers. + * + * Note that in this case tommy_arrayblkof_ref() returns a pointer to the element, + * that should be used for getting and setting elements in the array, + * as generic getter and setter are not available. + */ + +#ifndef __TOMMYARRAYBLKOF_H +#define __TOMMYARRAYBLKOF_H + +#include "tommytypes.h" +#include "tommyarray.h" + +#include /* for assert */ + +/******************************************************************************/ +/* array */ + +/** + * Elements for each block. + */ +#define TOMMY_ARRAYBLKOF_SIZE (4 * 1024) + +/** + * Array container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_arrayblkof_struct { + tommy_array block; /**< Array of blocks. */ + tommy_size_t element_size; /**< Size of the stored element in bytes. */ + tommy_count_t count; /**< Number of initialized elements in the array. */ +} tommy_arrayblkof; + +/** + * Initializes the array. + * \param element_size Size in byte of the element to store in the array. + */ +void tommy_arrayblkof_init(tommy_arrayblkof* array, tommy_size_t element_size); + +/** + * Deinitializes the array. + */ +void tommy_arrayblkof_done(tommy_arrayblkof* array); + +/** + * Grows the size up to the specified value. + * All the new elements in the array are initialized with the 0 value. + */ +void tommy_arrayblkof_grow(tommy_arrayblkof* array, tommy_count_t size); + +/** + * Gets a reference of the element at the specified position. + * You must be sure that space for this position is already + * allocated calling tommy_arrayblkof_grow(). + */ +tommy_inline void* tommy_arrayblkof_ref(tommy_arrayblkof* array, tommy_count_t pos) +{ + unsigned char* base; + + assert(pos < array->count); + + base = tommy_cast(unsigned char*, tommy_array_get(&array->block, pos / TOMMY_ARRAYBLKOF_SIZE)); + + return base + (pos % TOMMY_ARRAYBLKOF_SIZE) * array->element_size; +} + +/** + * Gets the initialized size of the array. + */ +tommy_inline tommy_count_t tommy_arrayblkof_size(tommy_arrayblkof* array) +{ + return array->count; +} + +/** + * Gets the size of allocated memory. + */ +tommy_size_t tommy_arrayblkof_memory_usage(tommy_arrayblkof* array); + +#endif + diff --git a/sources/tommyds/tommyarrayof.c b/sources/tommyds/tommyarrayof.c new file mode 100755 index 00000000..80aede79 --- /dev/null +++ b/sources/tommyds/tommyarrayof.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2013, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyarrayof.h" + +/******************************************************************************/ +/* array */ + +void tommy_arrayof_init(tommy_arrayof* array, tommy_size_t element_size) +{ + tommy_uint_t i; + + /* fixed initial size */ + array->element_size = element_size; + array->bucket_bit = TOMMY_ARRAYOF_BIT; + array->bucket_max = 1 << array->bucket_bit; + array->bucket[0] = tommy_calloc(array->bucket_max, array->element_size); + for (i = 1; i < TOMMY_ARRAYOF_BIT; ++i) + array->bucket[i] = array->bucket[0]; + + array->count = 0; +} + +void tommy_arrayof_done(tommy_arrayof* array) +{ + tommy_uint_t i; + + tommy_free(array->bucket[0]); + for (i = TOMMY_ARRAYOF_BIT; i < array->bucket_bit; ++i) { + unsigned char* segment = tommy_cast(unsigned char*, array->bucket[i]); + tommy_free(segment + (((tommy_ptrdiff_t)1) << i) * array->element_size); + } +} + +void tommy_arrayof_grow(tommy_arrayof* array, tommy_count_t count) +{ + if (array->count >= count) + return; + array->count = count; + + while (count > array->bucket_max) { + unsigned char* segment; + + /* allocate one more segment */ + segment = tommy_cast(unsigned char*, tommy_calloc(array->bucket_max, array->element_size)); + + /* store it adjusting the offset */ + /* cast to ptrdiff_t to ensure to get a negative value */ + array->bucket[array->bucket_bit] = segment - (tommy_ptrdiff_t)array->bucket_max * array->element_size; + + ++array->bucket_bit; + array->bucket_max = 1 << array->bucket_bit; + } +} + +tommy_size_t tommy_arrayof_memory_usage(tommy_arrayof* array) +{ + return array->bucket_max * (tommy_size_t)array->element_size; +} + diff --git a/sources/tommyds/tommyarrayof.h b/sources/tommyds/tommyarrayof.h new file mode 100755 index 00000000..8454838f --- /dev/null +++ b/sources/tommyds/tommyarrayof.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2013, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Dynamic array based on segments of exponential growing size. + * + * This array is able to grow dynamically upon request, without any reallocation. + * + * This is very similar at ::tommy_array, but it allows to store elements of any + * size and not just pointers. + * + * Note that in this case tommy_arrayof_ref() returns a pointer to the element, + * that should be used for getting and setting elements in the array, + * as generic getter and setter are not available. + */ + +#ifndef __TOMMYARRAYOF_H +#define __TOMMYARRAYOF_H + +#include "tommytypes.h" + +#include /* for assert */ + +/******************************************************************************/ +/* array */ + +/** + * Initial and minimal size of the array expressed as a power of 2. + * The initial size is 2^TOMMY_ARRAYOF_BIT. + */ +#define TOMMY_ARRAYOF_BIT 6 + +/** \internal + * Max number of elements as a power of 2. + */ +#define TOMMY_ARRAYOF_BIT_MAX 32 + +/** + * Array container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_arrayof_struct { + void* bucket[TOMMY_ARRAYOF_BIT_MAX]; /**< Dynamic array of buckets. */ + tommy_size_t element_size; /**< Size of the stored element in bytes. */ + tommy_uint_t bucket_bit; /**< Bits used in the bit mask. */ + tommy_count_t bucket_max; /**< Number of buckets. */ + tommy_count_t count; /**< Number of initialized elements in the array. */ +} tommy_arrayof; + +/** + * Initializes the array. + * \param element_size Size in byte of the element to store in the array. + */ +void tommy_arrayof_init(tommy_arrayof* array, tommy_size_t element_size); + +/** + * Deinitializes the array. + */ +void tommy_arrayof_done(tommy_arrayof* array); + +/** + * Grows the size up to the specified value. + * All the new elements in the array are initialized with the 0 value. + */ +void tommy_arrayof_grow(tommy_arrayof* array, tommy_count_t size); + +/** + * Gets a reference of the element at the specified position. + * You must be sure that space for this position is already + * allocated calling tommy_arrayof_grow(). + */ +tommy_inline void* tommy_arrayof_ref(tommy_arrayof* array, tommy_count_t pos) +{ + unsigned char* ptr; + tommy_uint_t bsr; + + assert(pos < array->count); + + /* get the highest bit set, in case of all 0, return 0 */ + bsr = tommy_ilog2_u32(pos | 1); + + ptr = tommy_cast(unsigned char*, array->bucket[bsr]); + + return ptr + pos * array->element_size; +} + +/** + * Gets the initialized size of the array. + */ +tommy_inline tommy_count_t tommy_arrayof_size(tommy_arrayof* array) +{ + return array->count; +} + +/** + * Gets the size of allocated memory. + */ +tommy_size_t tommy_arrayof_memory_usage(tommy_arrayof* array); + +#endif + diff --git a/sources/tommyds/tommychain.h b/sources/tommyds/tommychain.h new file mode 100755 index 00000000..b15bc6ae --- /dev/null +++ b/sources/tommyds/tommychain.h @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Chain of nodes. + * A chain of nodes is an abstraction used to implements complex list operations + * like sorting. + * + * Do not use this directly. Use lists instead. + */ + +#ifndef __TOMMYCHAIN_H +#define __TOMMYCHAIN_H + +#include "tommytypes.h" + +/******************************************************************************/ +/* chain */ + +/** + * Chain of nodes. + * A chain of nodes is a sequence of nodes with the following properties: + * - It contains at least one node. A chains of zero nodes cannot exist. + * - The next field of the tail is of *undefined* value. + * - The prev field of the head is of *undefined* value. + * - All the other inner prev and next fields are correctly set. + */ +typedef struct tommy_chain_struct { + tommy_node* head; /**< Pointer to the head of the chain. */ + tommy_node* tail; /**< Pointer to the tail of the chain. */ +} tommy_chain; + +/** + * Splices a chain in the middle of another chain. + */ +tommy_inline void tommy_chain_splice(tommy_node* first_before, tommy_node* first_after, tommy_node* second_head, tommy_node* second_tail) +{ + /* set the prev list */ + first_after->prev = second_tail; + second_head->prev = first_before; + + /* set the next list */ + first_before->next = second_head; + second_tail->next = first_after; +} + +/** + * Concats two chains. + */ +tommy_inline void tommy_chain_concat(tommy_node* first_tail, tommy_node* second_head) +{ + /* set the prev list */ + second_head->prev = first_tail; + + /* set the next list */ + first_tail->next = second_head; +} + +/** + * Merges two chains. + */ +tommy_inline void tommy_chain_merge(tommy_chain* first, tommy_chain* second, tommy_compare_func* cmp) +{ + tommy_node* first_i = first->head; + tommy_node* second_i = second->head; + + /* merge */ + while (1) { + if (cmp(first_i->data, second_i->data) > 0) { + tommy_node* next = second_i->next; + if (first_i == first->head) { + tommy_chain_concat(second_i, first_i); + first->head = second_i; + } else { + tommy_chain_splice(first_i->prev, first_i, second_i, second_i); + } + if (second_i == second->tail) + break; + second_i = next; + } else { + if (first_i == first->tail) { + tommy_chain_concat(first_i, second_i); + first->tail = second->tail; + break; + } + first_i = first_i->next; + } + } +} + +/** + * Merges two chains managing special degenerated cases. + * It's funtionally equivalent at tommy_chain_merge() but faster with already ordered chains. + */ +tommy_inline void tommy_chain_merge_degenerated(tommy_chain* first, tommy_chain* second, tommy_compare_func* cmp) +{ + /* identify the condition first <= second */ + if (cmp(first->tail->data, second->head->data) <= 0) { + tommy_chain_concat(first->tail, second->head); + first->tail = second->tail; + return; + } + + /* identify the condition second < first */ + /* here we must be strict on comparison to keep the sort stable */ + if (cmp(second->tail->data, first->head->data) < 0) { + tommy_chain_concat(second->tail, first->head); + first->head = second->head; + return; + } + + tommy_chain_merge(first, second, cmp); +} + +/** + * Max number of elements as a power of 2. + */ +#define TOMMY_CHAIN_BIT_MAX 32 + +/** + * Sorts a chain. + * It's a stable merge sort using power of 2 buckets, with O(N*log(N)) complexity, + * similar at the one used in the SGI STL libraries and in the Linux Kernel, + * but faster on degenerated cases like already ordered lists. + * + * SGI STL stl_list.h + * http://www.sgi.com/tech/stl/stl_list.h + * + * Linux Kernel lib/list_sort.c + * http://lxr.linux.no/#linux+v2.6.36/lib/list_sort.c + */ +tommy_inline void tommy_chain_mergesort(tommy_chain* chain, tommy_compare_func* cmp) +{ + /* + * Bit buckets of chains. + * Each bucket contains 2^i nodes or it's empty. + * The chain at address TOMMY_CHAIN_BIT_MAX is an independet variable operating as "carry". + * We keep it in the same "bit" vector to avoid reports from the valgrind tool sgcheck. + */ + tommy_chain bit[TOMMY_CHAIN_BIT_MAX + 1]; + + /** + * Value stored inside the bit bucket. + * It's used to know which bucket is empty of full. + */ + tommy_count_t counter; + tommy_node* node = chain->head; + tommy_node* tail = chain->tail; + tommy_count_t mask; + tommy_count_t i; + + counter = 0; + while (1) { + tommy_node* next; + tommy_chain* last; + + /* carry bit to add */ + last = &bit[TOMMY_CHAIN_BIT_MAX]; + bit[TOMMY_CHAIN_BIT_MAX].head = node; + bit[TOMMY_CHAIN_BIT_MAX].tail = node; + next = node->next; + + /* add the bit, propagating the carry */ + i = 0; + mask = counter; + while ((mask & 1) != 0) { + tommy_chain_merge_degenerated(&bit[i], last, cmp); + mask >>= 1; + last = &bit[i]; + ++i; + } + + /* copy the carry in the first empty bit */ + bit[i] = *last; + + /* add the carry in the counter */ + ++counter; + + if (node == tail) + break; + node = next; + } + + /* merge the buckets */ + i = tommy_ctz_u32(counter); + mask = counter >> i; + while (mask != 1) { + mask >>= 1; + if (mask & 1) + tommy_chain_merge_degenerated(&bit[i + 1], &bit[i], cmp); + else + bit[i + 1] = bit[i]; + ++i; + } + + *chain = bit[i]; +} + +#endif + diff --git a/sources/tommyds/tommyhash.c b/sources/tommyds/tommyhash.c new file mode 100755 index 00000000..05d78d25 --- /dev/null +++ b/sources/tommyds/tommyhash.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyhash.h" + +/******************************************************************************/ +/* hash */ + +tommy_inline tommy_uint32_t tommy_le_uint32_read(const void* ptr) +{ + /* allow unaligned read on Intel x86 and x86_64 platforms */ +#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__x86_64__) || defined(_M_X64) + /* defines from http://predef.sourceforge.net/ */ + return *(const tommy_uint32_t*)ptr; +#else + const unsigned char* ptr8 = tommy_cast(const unsigned char*, ptr); + return ptr8[0] + ((tommy_uint32_t)ptr8[1] << 8) + ((tommy_uint32_t)ptr8[2] << 16) + ((tommy_uint32_t)ptr8[3] << 24); +#endif +} + +#define tommy_rot(x, k) \ + (((x) << (k)) | ((x) >> (32 - (k)))) + +#define tommy_mix(a, b, c) \ + do { \ + a -= c; a ^= tommy_rot(c, 4); c += b; \ + b -= a; b ^= tommy_rot(a, 6); a += c; \ + c -= b; c ^= tommy_rot(b, 8); b += a; \ + a -= c; a ^= tommy_rot(c, 16); c += b; \ + b -= a; b ^= tommy_rot(a, 19); a += c; \ + c -= b; c ^= tommy_rot(b, 4); b += a; \ + } while (0) + +#define tommy_final(a, b, c) \ + do { \ + c ^= b; c -= tommy_rot(b, 14); \ + a ^= c; a -= tommy_rot(c, 11); \ + b ^= a; b -= tommy_rot(a, 25); \ + c ^= b; c -= tommy_rot(b, 16); \ + a ^= c; a -= tommy_rot(c, 4); \ + b ^= a; b -= tommy_rot(a, 14); \ + c ^= b; c -= tommy_rot(b, 24); \ + } while (0) + +tommy_uint32_t tommy_hash_u32(tommy_uint32_t init_val, const void* void_key, tommy_size_t key_len) +{ + const unsigned char* key = tommy_cast(const unsigned char*, void_key); + tommy_uint32_t a, b, c; + + a = b = c = 0xdeadbeef + ((tommy_uint32_t)key_len) + init_val; + + while (key_len > 12) { + a += tommy_le_uint32_read(key + 0); + b += tommy_le_uint32_read(key + 4); + c += tommy_le_uint32_read(key + 8); + + tommy_mix(a, b, c); + + key_len -= 12; + key += 12; + } + + switch (key_len) { + case 0 : + return c; /* used only when called with a zero length */ + case 12 : + c += tommy_le_uint32_read(key + 8); + b += tommy_le_uint32_read(key + 4); + a += tommy_le_uint32_read(key + 0); + break; + case 11 : c += ((tommy_uint32_t)key[10]) << 16; + case 10 : c += ((tommy_uint32_t)key[9]) << 8; + case 9 : c += key[8]; + case 8 : + b += tommy_le_uint32_read(key + 4); + a += tommy_le_uint32_read(key + 0); + break; + case 7 : b += ((tommy_uint32_t)key[6]) << 16; + case 6 : b += ((tommy_uint32_t)key[5]) << 8; + case 5 : b += key[4]; + case 4 : + a += tommy_le_uint32_read(key + 0); + break; + case 3 : a += ((tommy_uint32_t)key[2]) << 16; + case 2 : a += ((tommy_uint32_t)key[1]) << 8; + case 1 : a += key[0]; + } + + tommy_final(a, b, c); + + return c; +} + +tommy_uint64_t tommy_hash_u64(tommy_uint64_t init_val, const void* void_key, tommy_size_t key_len) +{ + const unsigned char* key = tommy_cast(const unsigned char*, void_key); + tommy_uint32_t a, b, c; + + a = b = c = 0xdeadbeef + ((tommy_uint32_t)key_len) + (init_val & 0xffffffff); + c += init_val >> 32; + + while (key_len > 12) { + a += tommy_le_uint32_read(key + 0); + b += tommy_le_uint32_read(key + 4); + c += tommy_le_uint32_read(key + 8); + + tommy_mix(a, b, c); + + key_len -= 12; + key += 12; + } + + switch (key_len) { + case 0 : + return c + ((tommy_uint64_t)b << 32); /* used only when called with a zero length */ + case 12 : + c += tommy_le_uint32_read(key + 8); + b += tommy_le_uint32_read(key + 4); + a += tommy_le_uint32_read(key + 0); + break; + case 11 : c += ((tommy_uint32_t)key[10]) << 16; + case 10 : c += ((tommy_uint32_t)key[9]) << 8; + case 9 : c += key[8]; + case 8 : + b += tommy_le_uint32_read(key + 4); + a += tommy_le_uint32_read(key + 0); + break; + case 7 : b += ((tommy_uint32_t)key[6]) << 16; + case 6 : b += ((tommy_uint32_t)key[5]) << 8; + case 5 : b += key[4]; + case 4 : + a += tommy_le_uint32_read(key + 0); + break; + case 3 : a += ((tommy_uint32_t)key[2]) << 16; + case 2 : a += ((tommy_uint32_t)key[1]) << 8; + case 1 : a += key[0]; + } + + tommy_final(a, b, c); + + return c + ((tommy_uint64_t)b << 32); +} + +tommy_uint32_t tommy_strhash_u32(tommy_uint64_t init_val, const void* void_key) +{ + const unsigned char* key = tommy_cast(const unsigned char*, void_key); + tommy_uint32_t a, b, c; + tommy_uint32_t m[3] = { 0xff, 0xff00, 0xff0000 }; + + a = b = c = 0xdeadbeef + init_val; + /* this is different than original lookup3 and the result won't match */ + + while (1) { + tommy_uint32_t v = tommy_le_uint32_read(key); + + if (tommy_haszero_u32(v)) { + if (v & m[0]) { + a += v & m[0]; + if (v & m[1]) { + a += v & m[1]; + if (v & m[2]) + a += v & m[2]; + } + } + + break; + } + + a += v; + + v = tommy_le_uint32_read(key + 4); + + if (tommy_haszero_u32(v)) { + if (v & m[0]) { + b += v & m[0]; + if (v & m[1]) { + b += v & m[1]; + if (v & m[2]) + b += v & m[2]; + } + } + + break; + } + + b += v; + + v = tommy_le_uint32_read(key + 8); + + if (tommy_haszero_u32(v)) { + if (v & m[0]) { + c += v & m[0]; + if (v & m[1]) { + c += v & m[1]; + if (v & m[2]) + c += v & m[2]; + } + } + + break; + } + + c += v; + + tommy_mix(a, b, c); + + key += 12; + } + + /* for lengths that are multiplers of 12 we already have called mix */ + /* this is different than the original lookup3 and the result won't match */ + + tommy_final(a, b, c); + + return c; +} + diff --git a/sources/tommyds/tommyhash.h b/sources/tommyds/tommyhash.h new file mode 100755 index 00000000..738e33d2 --- /dev/null +++ b/sources/tommyds/tommyhash.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Hash functions for the use with ::tommy_hashtable, ::tommy_hashdyn and ::tommy_hashlin. + */ + +#ifndef __TOMMYHASH_H +#define __TOMMYHASH_H + +#include "tommytypes.h" + +/******************************************************************************/ +/* hash */ + +/** + * Hash type used in hashtables. + */ +typedef tommy_key_t tommy_hash_t; + +/** + * Hash function with a 32 bits result. + * Implementation of the Robert Jenkins "lookup3" hash 32 bits version, + * from http://www.burtleburtle.net/bob/hash/doobs.html, function hashlittle(). + * + * This hash is designed to provide a good overall performance in all platforms, + * including 32 bits. If you target only 64 bits, you can use faster hashes, + * like SpookyHash or FarmHash. + * + * \param init_val Initialization value. + * Using a different initialization value, you can generate a completely different set of hash values. + * Use 0 if not relevant. + * \param void_key Pointer to the data to hash. + * \param key_len Size of the data to hash. + * \note + * This function is endianess independent. + * \return The hash value of 32 bits. + */ +tommy_uint32_t tommy_hash_u32(tommy_uint32_t init_val, const void* void_key, tommy_size_t key_len); + +/** + * Hash function with a 64 bits result. + * Implementation of the Robert Jenkins "lookup3" hash 64 bits versions, + * from http://www.burtleburtle.net/bob/hash/doobs.html, function hashlittle2(). + * + * This hash is designed to provide a good overall performance in all platforms, + * including 32 bits. If you target only 64 bits, you can use faster hashes, + * like SpookyHash or FarmHash. + * + * \param init_val Initialization value. + * Using a different initialization value, you can generate a completely different set of hash values. + * Use 0 if not relevant. + * \param void_key Pointer to the data to hash. + * \param key_len Size of the data to hash. + * \note + * This function is endianess independent. + * \return The hash value of 64 bits. + */ +tommy_uint64_t tommy_hash_u64(tommy_uint64_t init_val, const void* void_key, tommy_size_t key_len); + +/** + * String hash function with a 32 bits result. + * Implementation is based on the the Robert Jenkins "lookup3" hash 32 bits version, + * from http://www.burtleburtle.net/bob/hash/doobs.html, function hashlittle(). + * + * This hash is designed to handle strings with an unknown length. If you + * know the string length, the other hash functions are surely faster. + * + * \param init_val Initialization value. + * Using a different initialization value, you can generate a completely different set of hash values. + * Use 0 if not relevant. + * \param void_key Pointer to the string to hash. It has to be 0 terminated. + * \note + * This function is endianess independent. + * \return The hash value of 32 bits. + */ +tommy_uint32_t tommy_strhash_u32(tommy_uint64_t init_val, const void* void_key); + +/** + * Integer reversible hash function for 32 bits. + * Implementation of the Robert Jenkins "4-byte Integer Hashing", + * from http://burtleburtle.net/bob/hash/integer.html + */ +tommy_inline tommy_uint32_t tommy_inthash_u32(tommy_uint32_t key) +{ + key -= key << 6; + key ^= key >> 17; + key -= key << 9; + key ^= key << 4; + key -= key << 3; + key ^= key << 10; + key ^= key >> 15; + + return key; +} + +/** + * Integer reversible hash function for 64 bits. + * Implementation of the Thomas Wang "Integer Hash Function", + * from http://web.archive.org/web/20071223173210/http://www.concentric.net/~Ttwang/tech/inthash.htm + */ +tommy_inline tommy_uint64_t tommy_inthash_u64(tommy_uint64_t key) +{ + key = ~key + (key << 21); + key = key ^ (key >> 24); + key = key + (key << 3) + (key << 8); + key = key ^ (key >> 14); + key = key + (key << 2) + (key << 4); + key = key ^ (key >> 28); + key = key + (key << 31); + + return key; +} + +#endif + diff --git a/sources/tommyds/tommyhashdyn.c b/sources/tommyds/tommyhashdyn.c new file mode 100755 index 00000000..02ef4049 --- /dev/null +++ b/sources/tommyds/tommyhashdyn.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyhashdyn.h" +#include "tommylist.h" + +/******************************************************************************/ +/* hashdyn */ + +void tommy_hashdyn_init(tommy_hashdyn* hashdyn) +{ + /* fixed initial size */ + hashdyn->bucket_bit = TOMMY_HASHDYN_BIT; + hashdyn->bucket_max = 1 << hashdyn->bucket_bit; + hashdyn->bucket_mask = hashdyn->bucket_max - 1; + hashdyn->bucket = tommy_cast(tommy_hashdyn_node**, tommy_calloc(hashdyn->bucket_max, sizeof(tommy_hashdyn_node*))); + + hashdyn->count = 0; +} + +void tommy_hashdyn_done(tommy_hashdyn* hashdyn) +{ + tommy_free(hashdyn->bucket); +} + +/** + * Resize the bucket vector. + */ +static void tommy_hashdyn_resize(tommy_hashdyn* hashdyn, tommy_count_t new_bucket_bit) +{ + tommy_count_t bucket_bit; + tommy_count_t bucket_max; + tommy_count_t new_bucket_max; + tommy_count_t new_bucket_mask; + tommy_hashdyn_node** new_bucket; + + bucket_bit = hashdyn->bucket_bit; + bucket_max = hashdyn->bucket_max; + + new_bucket_max = 1 << new_bucket_bit; + new_bucket_mask = new_bucket_max - 1; + + /* allocate the new vector using malloc() and not calloc() */ + /* because data is fully initialized in the update process */ + new_bucket = tommy_cast(tommy_hashdyn_node**, tommy_malloc(new_bucket_max * sizeof(tommy_hashdyn_node*))); + + /* reinsert all the elements */ + if (new_bucket_bit > bucket_bit) { + tommy_count_t i; + + /* grow */ + for (i = 0; i < bucket_max; ++i) { + tommy_hashdyn_node* j; + + /* setup the new two buckets */ + new_bucket[i] = 0; + new_bucket[i + bucket_max] = 0; + + /* reinsert the bucket */ + j = hashdyn->bucket[i]; + while (j) { + tommy_hashdyn_node* j_next = j->next; + tommy_count_t pos = j->key & new_bucket_mask; + if (new_bucket[pos]) + tommy_list_insert_tail_not_empty(new_bucket[pos], j); + else + tommy_list_insert_first(&new_bucket[pos], j); + j = j_next; + } + } + } else { + tommy_count_t i; + + /* shrink */ + for (i = 0; i < new_bucket_max; ++i) { + /* setup the new bucket with the lower bucket*/ + new_bucket[i] = hashdyn->bucket[i]; + + /* concat the upper bucket */ + tommy_list_concat(&new_bucket[i], &hashdyn->bucket[i + new_bucket_max]); + } + } + + tommy_free(hashdyn->bucket); + + /* setup */ + hashdyn->bucket_bit = new_bucket_bit; + hashdyn->bucket_max = new_bucket_max; + hashdyn->bucket_mask = new_bucket_mask; + hashdyn->bucket = new_bucket; +} + +/** + * Grow. + */ +tommy_inline void hashdyn_grow_step(tommy_hashdyn* hashdyn) +{ + /* grow if more than 50% full */ + if (hashdyn->count >= hashdyn->bucket_max / 2) + tommy_hashdyn_resize(hashdyn, hashdyn->bucket_bit + 1); +} + +/** + * Shrink. + */ +tommy_inline void hashdyn_shrink_step(tommy_hashdyn* hashdyn) +{ + /* shrink if less than 12.5% full */ + if (hashdyn->count <= hashdyn->bucket_max / 8 && hashdyn->bucket_bit > TOMMY_HASHDYN_BIT) + tommy_hashdyn_resize(hashdyn, hashdyn->bucket_bit - 1); +} + +void tommy_hashdyn_insert(tommy_hashdyn* hashdyn, tommy_hashdyn_node* node, void* data, tommy_hash_t hash) +{ + tommy_count_t pos = hash & hashdyn->bucket_mask; + + tommy_list_insert_tail(&hashdyn->bucket[pos], node, data); + + node->key = hash; + + ++hashdyn->count; + + hashdyn_grow_step(hashdyn); +} + +void* tommy_hashdyn_remove_existing(tommy_hashdyn* hashdyn, tommy_hashdyn_node* node) +{ + tommy_count_t pos = node->key & hashdyn->bucket_mask; + + tommy_list_remove_existing(&hashdyn->bucket[pos], node); + + --hashdyn->count; + + hashdyn_shrink_step(hashdyn); + + return node->data; +} + +void* tommy_hashdyn_remove(tommy_hashdyn* hashdyn, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash) +{ + tommy_count_t pos = hash & hashdyn->bucket_mask; + tommy_hashdyn_node* node = hashdyn->bucket[pos]; + + while (node) { + /* we first check if the hash matches, as in the same bucket we may have multiples hash values */ + if (node->key == hash && cmp(cmp_arg, node->data) == 0) { + tommy_list_remove_existing(&hashdyn->bucket[pos], node); + + --hashdyn->count; + + hashdyn_shrink_step(hashdyn); + + return node->data; + } + node = node->next; + } + + return 0; +} + +void tommy_hashdyn_foreach(tommy_hashdyn* hashdyn, tommy_foreach_func* func) +{ + tommy_count_t bucket_max = hashdyn->bucket_max; + tommy_hashdyn_node** bucket = hashdyn->bucket; + tommy_count_t pos; + + for (pos = 0; pos < bucket_max; ++pos) { + tommy_hashdyn_node* node = bucket[pos]; + + while (node) { + void* data = node->data; + node = node->next; + func(data); + } + } +} + +void tommy_hashdyn_foreach_arg(tommy_hashdyn* hashdyn, tommy_foreach_arg_func* func, void* arg) +{ + tommy_count_t bucket_max = hashdyn->bucket_max; + tommy_hashdyn_node** bucket = hashdyn->bucket; + tommy_count_t pos; + + for (pos = 0; pos < bucket_max; ++pos) { + tommy_hashdyn_node* node = bucket[pos]; + + while (node) { + void* data = node->data; + node = node->next; + func(arg, data); + } + } +} + +tommy_size_t tommy_hashdyn_memory_usage(tommy_hashdyn* hashdyn) +{ + return hashdyn->bucket_max * (tommy_size_t)sizeof(hashdyn->bucket[0]) + + tommy_hashdyn_count(hashdyn) * (tommy_size_t)sizeof(tommy_hashdyn_node); +} + diff --git a/sources/tommyds/tommyhashdyn.h b/sources/tommyds/tommyhashdyn.h new file mode 100755 index 00000000..ed4a6074 --- /dev/null +++ b/sources/tommyds/tommyhashdyn.h @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Dynamic chained hashtable. + * + * This hashtable resizes dynamically. It starts with the minimal size of 16 buckets, it doubles + * the size then it reaches a load factor greater than 0.5 and it halves the size with a load + * factor lower than 0.125. + * + * All the elements are reallocated in a single resize operation done inside + * tommy_hashdyn_insert() or tommy_hashdyn_remove(). + * + * Note that the resize operation takes approximatively 100 [ms] with 1 million of elements, + * and 1 [second] with 10 millions. This could be a problem in real-time applications. + * + * The resize also fragment the heap, as it involves allocating a double-sized table, copy elements, + * and deallocating the older table. Leaving a big hole in the heap. + * + * The ::tommy_hashlin hashtable fixes both problems. + * + * To initialize the hashtable you have to call tommy_hashdyn_init(). + * + * \code + * tommy_hashslin hashdyn; + * + * tommy_hashdyn_init(&hashdyn); + * \endcode + * + * To insert elements in the hashtable you have to call tommy_hashdyn_insert() for + * each element. + * In the insertion call you have to specify the address of the node, the + * address of the object, and the hash value of the key to use. + * The address of the object is used to initialize the tommy_node::data field + * of the node, and the hash to initialize the tommy_node::key field. + * + * \code + * struct object { + * int value; + * // other fields + * tommy_node node; + * }; + * + * struct object* obj = malloc(sizeof(struct object)); // creates the object + * + * obj->value = ...; // initializes the object + * + * tommy_hashdyn_insert(&hashdyn, &obj->node, obj, tommy_inthash_u32(obj->value)); // inserts the object + * \endcode + * + * To find and element in the hashtable you have to call tommy_hashtable_search() + * providing a comparison function, its argument, and the hash of the key to search. + * + * \code + * int compare(const void* arg, const void* obj) + * { + * return *(const int*)arg != ((const struct object*)obj)->value; + * } + * + * int value_to_find = 1; + * struct object* obj = tommy_hashdyn_search(&hashdyn, compare, &value_to_find, tommy_inthash_u32(value_to_find)); + * if (!obj) { + * // not found + * } else { + * // found + * } + * \endcode + * + * To iterate over all the elements in the hashtable with the same key, you have to + * use tommy_hashdyn_bucket() and follow the tommy_node::next pointer until NULL. + * You have also to check explicitely for the key, as the bucket may contains + * different keys. + * + * \code + * int value_to_find = 1; + * tommy_node* i = tommy_hashdyn_bucket(&hashdyn, tommy_inthash_u32(value_to_find)); + * while (i) { + * struct object* obj = i->data; // gets the object pointer + * + * if (obj->value == value_to_find) { + * printf("%d\n", obj->value); // process the object + * } + * + * i = i->next; // goes to the next element + * } + * \endcode + * + * To remove an element from the hashtable you have to call tommy_hashdyn_remove() + * providing a comparison function, its argument, and the hash of the key to search + * and remove. + * + * \code + * struct object* obj = tommy_hashdyn_remove(&hashdyn, compare, &value_to_remove, tommy_inthash_u32(value_to_remove)); + * if (obj) { + * free(obj); // frees the object allocated memory + * } + * \endcode + * + * To destroy the hashtable you have to remove all the elements, and deinitialize + * the hashtable calling tommy_hashdyn_done(). + * + * \code + * tommy_hashdyn_done(&hashdyn); + * \endcode + * + * If you need to iterate over all the elements in the hashtable, you can use + * tommy_hashdyn_foreach() or tommy_hashdyn_foreach_arg(). + * If you need a more precise control with a real iteration, you have to insert + * all the elements also in a ::tommy_list, and use the list to iterate. + * See the \ref multiindex example for more detail. + */ + +#ifndef __TOMMYHASHDYN_H +#define __TOMMYHASHDYN_H + +#include "tommyhash.h" + +/******************************************************************************/ +/* hashdyn */ + +/** \internal + * Initial and minimal size of the hashtable expressed as a power of 2. + * The initial size is 2^TOMMY_HASHDYN_BIT. + */ +#define TOMMY_HASHDYN_BIT 4 + +/** + * Hashtable node. + * This is the node that you have to include inside your objects. + */ +typedef tommy_node tommy_hashdyn_node; + +/** + * Hashtable container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_hashdyn_struct { + tommy_hashdyn_node** bucket; /**< Hash buckets. One list for each hash modulus. */ + tommy_uint_t bucket_bit; /**< Bits used in the bit mask. */ + tommy_count_t bucket_max; /**< Number of buckets. */ + tommy_count_t bucket_mask; /**< Bit mask to access the buckets. */ + tommy_count_t count; /**< Number of elements. */ +} tommy_hashdyn; + +/** + * Initializes the hashtable. + */ +void tommy_hashdyn_init(tommy_hashdyn* hashdyn); + +/** + * Deinitializes the hashtable. + * + * You can call this function with elements still contained, + * but such elements are not going to be freed by this call. + */ +void tommy_hashdyn_done(tommy_hashdyn* hashdyn); + +/** + * Inserts an element in the hashtable. + */ +void tommy_hashdyn_insert(tommy_hashdyn* hashdyn, tommy_hashdyn_node* node, void* data, tommy_hash_t hash); + +/** + * Searches and removes an element from the hashtable. + * You have to provide a compare function and the hash of the element you want to remove. + * If the element is not found, 0 is returned. + * If more equal elements are present, the first one is removed. + * \param cmp Compare function called with cmp_arg as first argument and with the element to compare as a second one. + * The function should return 0 for equal elements, anything other for different elements. + * \param cmp_arg Compare argument passed as first argument of the compare function. + * \param hash Hash of the element to find and remove. + * \return The removed element, or 0 if not found. + */ +void* tommy_hashdyn_remove(tommy_hashdyn* hashdyn, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash); + +/** + * Gets the bucket of the specified hash. + * The bucket is guaranteed to contain ALL the elements with the specified hash, + * but it can contain also others. + * You can access elements in the bucket following the ::next pointer until 0. + * \param hash Hash of the element to find. + * \return The head of the bucket, or 0 if empty. + */ +tommy_inline tommy_hashdyn_node* tommy_hashdyn_bucket(tommy_hashdyn* hashdyn, tommy_hash_t hash) +{ + return hashdyn->bucket[hash & hashdyn->bucket_mask]; +} + +/** + * Searches an element in the hashtable. + * You have to provide a compare function and the hash of the element you want to find. + * If more equal elements are present, the first one is returned. + * \param cmp Compare function called with cmp_arg as first argument and with the element to compare as a second one. + * The function should return 0 for equal elements, anything other for different elements. + * \param cmp_arg Compare argument passed as first argument of the compare function. + * \param hash Hash of the element to find. + * \return The first element found, or 0 if none. + */ +tommy_inline void* tommy_hashdyn_search(tommy_hashdyn* hashdyn, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash) +{ + tommy_hashdyn_node* i = tommy_hashdyn_bucket(hashdyn, hash); + + while (i) { + /* we first check if the hash matches, as in the same bucket we may have multiples hash values */ + if (i->key == hash && cmp(cmp_arg, i->data) == 0) + return i->data; + i = i->next; + } + return 0; +} + +/** + * Removes an element from the hashtable. + * You must already have the address of the element to remove. + * \return The tommy_node::data field of the node removed. + */ +void* tommy_hashdyn_remove_existing(tommy_hashdyn* hashdyn, tommy_hashdyn_node* node); + +/** + * Calls the specified function for each element in the hashtable. + * + * You cannot add or remove elements from the inside of the callback, + * but can use it to deallocate them. + * + * \code + * tommy_hashdyn hashdyn; + * + * // initializes the hashtable + * tommy_hashdyn_init(&hashdyn); + * + * ... + * + * // creates an object + * struct object* obj = malloc(sizeof(struct object)); + * + * ... + * + * // insert it in the hashtable + * tommy_hashdyn_insert(&hashdyn, &obj->node, obj, tommy_inthash_u32(obj->value)); + * + * ... + * + * // deallocates all the objects iterating the hashtable + * tommy_hashdyn_foreach(&hashdyn, free); + * + * // deallocates the hashtable + * tommy_hashdyn_done(&hashdyn); + * \endcode + */ +void tommy_hashdyn_foreach(tommy_hashdyn* hashdyn, tommy_foreach_func* func); + +/** + * Calls the specified function with an argument for each element in the hashtable. + */ +void tommy_hashdyn_foreach_arg(tommy_hashdyn* hashdyn, tommy_foreach_arg_func* func, void* arg); + +/** + * Gets the number of elements. + */ +tommy_inline tommy_count_t tommy_hashdyn_count(tommy_hashdyn* hashdyn) +{ + return hashdyn->count; +} + +/** + * Gets the size of allocated memory. + * It includes the size of the ::tommy_hashdyn_node of the stored elements. + */ +tommy_size_t tommy_hashdyn_memory_usage(tommy_hashdyn* hashdyn); + +#endif + diff --git a/sources/tommyds/tommyhashlin.c b/sources/tommyds/tommyhashlin.c new file mode 100755 index 00000000..bcf7d986 --- /dev/null +++ b/sources/tommyds/tommyhashlin.c @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyhashlin.h" +#include "tommylist.h" + +#include /* for assert */ + +/******************************************************************************/ +/* hashlin */ + +/** + * Reallocation states. + */ +#define TOMMY_HASHLIN_STATE_STABLE 0 +#define TOMMY_HASHLIN_STATE_GROW 1 +#define TOMMY_HASHLIN_STATE_SHRINK 2 + +/** + * Set the hashtable in stable state. + */ +tommy_inline void tommy_hashlin_stable(tommy_hashlin* hashlin) +{ + hashlin->state = TOMMY_HASHLIN_STATE_STABLE; + + /* setup low_mask/max/split to allow tommy_hashlin_bucket_ref() */ + /* and tommy_hashlin_foreach() to work regardless we are in stable state */ + hashlin->low_max = hashlin->bucket_max; + hashlin->low_mask = hashlin->bucket_mask; + hashlin->split = 0; +} + +void tommy_hashlin_init(tommy_hashlin* hashlin) +{ + tommy_uint_t i; + + /* fixed initial size */ + hashlin->bucket_bit = TOMMY_HASHLIN_BIT; + hashlin->bucket_max = 1 << hashlin->bucket_bit; + hashlin->bucket_mask = hashlin->bucket_max - 1; + hashlin->bucket[0] = tommy_cast(tommy_hashlin_node**, tommy_calloc(hashlin->bucket_max, sizeof(tommy_hashlin_node*))); + for (i = 1; i < TOMMY_HASHLIN_BIT; ++i) + hashlin->bucket[i] = hashlin->bucket[0]; + + /* stable state */ + tommy_hashlin_stable(hashlin); + + hashlin->count = 0; +} + +void tommy_hashlin_done(tommy_hashlin* hashlin) +{ + tommy_uint_t i; + + tommy_free(hashlin->bucket[0]); + for (i = TOMMY_HASHLIN_BIT; i < hashlin->bucket_bit; ++i) { + tommy_hashlin_node** segment = hashlin->bucket[i]; + tommy_free(&segment[((tommy_ptrdiff_t)1) << i]); + } +} + +/** + * Grow one step. + */ +tommy_inline void hashlin_grow_step(tommy_hashlin* hashlin) +{ + /* grow if more than 50% full */ + if (hashlin->state != TOMMY_HASHLIN_STATE_GROW + && hashlin->count > hashlin->bucket_max / 2 + ) { + /* if we are stable, setup a new grow state */ + /* otherwise continue with the already setup shrink one */ + /* but in backward direction */ + if (hashlin->state == TOMMY_HASHLIN_STATE_STABLE) { + tommy_hashlin_node** segment; + + /* set the lower size */ + hashlin->low_max = hashlin->bucket_max; + hashlin->low_mask = hashlin->bucket_mask; + + /* allocate the new vector using malloc() and not calloc() */ + /* because data is fully initialized in the split process */ + segment = tommy_cast(tommy_hashlin_node**, tommy_malloc(hashlin->low_max * sizeof(tommy_hashlin_node*))); + + /* store it adjusting the offset */ + /* cast to ptrdiff_t to ensure to get a negative value */ + hashlin->bucket[hashlin->bucket_bit] = &segment[-(tommy_ptrdiff_t)hashlin->low_max]; + + /* grow the hash size */ + ++hashlin->bucket_bit; + hashlin->bucket_max = 1 << hashlin->bucket_bit; + hashlin->bucket_mask = hashlin->bucket_max - 1; + + /* start from the beginning going forward */ + hashlin->split = 0; + } + + /* grow state */ + hashlin->state = TOMMY_HASHLIN_STATE_GROW; + } + + /* if we are growing */ + if (hashlin->state == TOMMY_HASHLIN_STATE_GROW) { + /* compute the split target required to finish the reallocation before the next resize */ + tommy_count_t split_target = 2 * hashlin->count; + + /* reallocate buckets until the split target */ + while (hashlin->split + hashlin->low_max < split_target) { + tommy_hashlin_node** split[2]; + tommy_hashlin_node* j; + tommy_count_t mask; + + /* get the low bucket */ + split[0] = tommy_hashlin_pos(hashlin, hashlin->split); + + /* get the high bucket */ + split[1] = tommy_hashlin_pos(hashlin, hashlin->split + hashlin->low_max); + + /* save the low bucket */ + j = *split[0]; + + /* reinitialize the buckets */ + *split[0] = 0; + *split[1] = 0; + + /* the bit used to identify the bucket */ + mask = hashlin->low_max; + + /* flush the bucket */ + while (j) { + tommy_hashlin_node* j_next = j->next; + tommy_count_t pos = (j->key & mask) != 0; + if (*split[pos]) + tommy_list_insert_tail_not_empty(*split[pos], j); + else + tommy_list_insert_first(split[pos], j); + j = j_next; + } + + /* go forward */ + ++hashlin->split; + + /* if we have finished, change the state */ + if (hashlin->split == hashlin->low_max) { + /* go in stable mode */ + tommy_hashlin_stable(hashlin); + break; + } + } + } +} + +/** + * Shrink one step. + */ +tommy_inline void hashlin_shrink_step(tommy_hashlin* hashlin) +{ + /* shrink if less than 12.5% full */ + if (hashlin->state != TOMMY_HASHLIN_STATE_SHRINK + && hashlin->count < hashlin->bucket_max / 8 + ) { + /* avoid to shrink the first bucket */ + if (hashlin->bucket_bit > TOMMY_HASHLIN_BIT) { + /* if we are stable, setup a new shrink state */ + /* otherwise continue with the already setup grow one */ + /* but in backward direction */ + if (hashlin->state == TOMMY_HASHLIN_STATE_STABLE) { + /* set the lower size */ + hashlin->low_max = hashlin->bucket_max / 2; + hashlin->low_mask = hashlin->bucket_mask / 2; + + /* start from the half going backward */ + hashlin->split = hashlin->low_max; + } + + /* start reallocation */ + hashlin->state = TOMMY_HASHLIN_STATE_SHRINK; + } + } + + /* if we are shrinking */ + if (hashlin->state == TOMMY_HASHLIN_STATE_SHRINK) { + /* compute the split target required to finish the reallocation before the next resize */ + tommy_count_t split_target = 8 * hashlin->count; + + /* reallocate buckets until the split target */ + while (hashlin->split + hashlin->low_max > split_target) { + tommy_hashlin_node** split[2]; + + /* go backward position */ + --hashlin->split; + + /* get the low bucket */ + split[0] = tommy_hashlin_pos(hashlin, hashlin->split); + + /* get the high bucket */ + split[1] = tommy_hashlin_pos(hashlin, hashlin->split + hashlin->low_max); + + /* concat the high bucket into the low one */ + tommy_list_concat(split[0], split[1]); + + /* if we have finished, clean up and change the state */ + if (hashlin->split == 0) { + tommy_hashlin_node** segment; + + /* shrink the hash size */ + --hashlin->bucket_bit; + hashlin->bucket_max = 1 << hashlin->bucket_bit; + hashlin->bucket_mask = hashlin->bucket_max - 1; + + /* free the last segment */ + segment = hashlin->bucket[hashlin->bucket_bit]; + tommy_free(&segment[((tommy_ptrdiff_t)1) << hashlin->bucket_bit]); + + /* go in stable mode */ + tommy_hashlin_stable(hashlin); + break; + } + } + } +} + +void tommy_hashlin_insert(tommy_hashlin* hashlin, tommy_hashlin_node* node, void* data, tommy_hash_t hash) +{ + tommy_list_insert_tail(tommy_hashlin_bucket_ref(hashlin, hash), node, data); + + node->key = hash; + + ++hashlin->count; + + hashlin_grow_step(hashlin); +} + +void* tommy_hashlin_remove_existing(tommy_hashlin* hashlin, tommy_hashlin_node* node) +{ + tommy_list_remove_existing(tommy_hashlin_bucket_ref(hashlin, node->key), node); + + --hashlin->count; + + hashlin_shrink_step(hashlin); + + return node->data; +} + +void* tommy_hashlin_remove(tommy_hashlin* hashlin, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash) +{ + tommy_hashlin_node** let_ptr = tommy_hashlin_bucket_ref(hashlin, hash); + tommy_hashlin_node* node = *let_ptr; + + while (node) { + /* we first check if the hash matches, as in the same bucket we may have multiples hash values */ + if (node->key == hash && cmp(cmp_arg, node->data) == 0) { + tommy_list_remove_existing(let_ptr, node); + + --hashlin->count; + + hashlin_shrink_step(hashlin); + + return node->data; + } + node = node->next; + } + + return 0; +} + +void tommy_hashlin_foreach(tommy_hashlin* hashlin, tommy_foreach_func* func) +{ + tommy_count_t bucket_max; + tommy_count_t pos; + + /* number of valid buckets */ + bucket_max = hashlin->low_max + hashlin->split; + + for (pos = 0; pos < bucket_max; ++pos) { + tommy_hashlin_node* node = *tommy_hashlin_pos(hashlin, pos); + + while (node) { + void* data = node->data; + node = node->next; + func(data); + } + } +} + +void tommy_hashlin_foreach_arg(tommy_hashlin* hashlin, tommy_foreach_arg_func* func, void* arg) +{ + tommy_count_t bucket_max; + tommy_count_t pos; + + /* number of valid buckets */ + bucket_max = hashlin->low_max + hashlin->split; + + for (pos = 0; pos < bucket_max; ++pos) { + tommy_hashlin_node* node = *tommy_hashlin_pos(hashlin, pos); + + while (node) { + void* data = node->data; + node = node->next; + func(arg, data); + } + } +} + +tommy_size_t tommy_hashlin_memory_usage(tommy_hashlin* hashlin) +{ + return hashlin->bucket_max * (tommy_size_t)sizeof(hashlin->bucket[0][0]) + + hashlin->count * (tommy_size_t)sizeof(tommy_hashlin_node); +} + diff --git a/sources/tommyds/tommyhashlin.h b/sources/tommyds/tommyhashlin.h new file mode 100755 index 00000000..eaf59e67 --- /dev/null +++ b/sources/tommyds/tommyhashlin.h @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Linear chained hashtable. + * + * This hashtable resizes dynamically and progressively using a variation of the + * linear hashing algorithm described in http://en.wikipedia.org/wiki/Linear_hashing + * + * It starts with the minimal size of 16 buckets, it doubles the size then it + * reaches a load factor greater than 0.5 and it halves the size with a load + * factor lower than 0.125. + * + * The progressive resize is good for real-time and interactive applications + * as it makes insert and delete operations taking always the same time. + * + * For resizing it's used a dynamic array that supports access to not contigous + * segments. + * In this way we only allocate additional table segments on the heap, without + * freeing the previous table, and then not increasing the heap fragmentation. + * + * The resize takes place inside tommy_hashlin_insert() and tommy_hashlin_remove(). + * No resize is done in the tommy_hashlin_search() operation. + * + * To initialize the hashtable you have to call tommy_hashlin_init(). + * + * \code + * tommy_hashslin hashlin; + * + * tommy_hashlin_init(&hashlin); + * \endcode + * + * To insert elements in the hashtable you have to call tommy_hashlin_insert() for + * each element. + * In the insertion call you have to specify the address of the node, the + * address of the object, and the hash value of the key to use. + * The address of the object is used to initialize the tommy_node::data field + * of the node, and the hash to initialize the tommy_node::key field. + * + * \code + * struct object { + * int value; + * // other fields + * tommy_node node; + * }; + * + * struct object* obj = malloc(sizeof(struct object)); // creates the object + * + * obj->value = ...; // initializes the object + * + * tommy_hashlin_insert(&hashlin, &obj->node, obj, tommy_inthash_u32(obj->value)); // inserts the object + * \endcode + * + * To find and element in the hashtable you have to call tommy_hashtable_search() + * providing a comparison function, its argument, and the hash of the key to search. + * + * \code + * int compare(const void* arg, const void* obj) + * { + * return *(const int*)arg != ((const struct object*)obj)->value; + * } + * + * int value_to_find = 1; + * struct object* obj = tommy_hashlin_search(&hashlin, compare, &value_to_find, tommy_inthash_u32(value_to_find)); + * if (!obj) { + * // not found + * } else { + * // found + * } + * \endcode + * + * To iterate over all the elements in the hashtable with the same key, you have to + * use tommy_hashlin_bucket() and follow the tommy_node::next pointer until NULL. + * You have also to check explicitely for the key, as the bucket may contains + * different keys. + * + * \code + * int value_to_find = 1; + * tommy_node* i = tommy_hashlin_bucket(&hashlin, tommy_inthash_u32(value_to_find)); + * while (i) { + * struct object* obj = i->data; // gets the object pointer + * + * if (obj->value == value_to_find) { + * printf("%d\n", obj->value); // process the object + * } + * + * i = i->next; // goes to the next element + * } + * \endcode + * + * To remove an element from the hashtable you have to call tommy_hashlin_remove() + * providing a comparison function, its argument, and the hash of the key to search + * and remove. + * + * \code + * struct object* obj = tommy_hashlin_remove(&hashlin, compare, &value_to_remove, tommy_inthash_u32(value_to_remove)); + * if (obj) { + * free(obj); // frees the object allocated memory + * } + * \endcode + * + * To destroy the hashtable you have to remove all the elements, and deinitialize + * the hashtable calling tommy_hashlin_done(). + * + * \code + * tommy_hashlin_done(&hashlin); + * \endcode + * + * If you need to iterate over all the elements in the hashtable, you can use + * tommy_hashlin_foreach() or tommy_hashlin_foreach_arg(). + * If you need a more precise control with a real iteration, you have to insert + * all the elements also in a ::tommy_list, and use the list to iterate. + * See the \ref multiindex example for more detail. + */ + +#ifndef __TOMMYHASHLIN_H +#define __TOMMYHASHLIN_H + +#include "tommyhash.h" + +/******************************************************************************/ +/* hashlin */ + +/** \internal + * Initial and minimal size of the hashtable expressed as a power of 2. + * The initial size is 2^TOMMY_HASHLIN_BIT. + */ +#define TOMMY_HASHLIN_BIT 6 + +/** + * Hashtable node. + * This is the node that you have to include inside your objects. + */ +typedef tommy_node tommy_hashlin_node; + +/** \internal + * Max number of elements as a power of 2. + */ +#define TOMMY_HASHLIN_BIT_MAX 32 + +/** + * Hashtable container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_hashlin_struct { + tommy_hashlin_node** bucket[TOMMY_HASHLIN_BIT_MAX]; /**< Dynamic array of hash buckets. One list for each hash modulus. */ + tommy_uint_t bucket_bit; /**< Bits used in the bit mask. */ + tommy_count_t bucket_max; /**< Number of buckets. */ + tommy_count_t bucket_mask; /**< Bit mask to access the buckets. */ + tommy_count_t low_max; /**< Low order max value. */ + tommy_count_t low_mask; /**< Low order mask value. */ + tommy_count_t split; /**< Split position. */ + tommy_count_t count; /**< Number of elements. */ + tommy_uint_t state; /**< Reallocation state. */ +} tommy_hashlin; + +/** + * Initializes the hashtable. + */ +void tommy_hashlin_init(tommy_hashlin* hashlin); + +/** + * Deinitializes the hashtable. + * + * You can call this function with elements still contained, + * but such elements are not going to be freed by this call. + */ +void tommy_hashlin_done(tommy_hashlin* hashlin); + +/** + * Inserts an element in the hashtable. + */ +void tommy_hashlin_insert(tommy_hashlin* hashlin, tommy_hashlin_node* node, void* data, tommy_hash_t hash); + +/** + * Searches and removes an element from the hashtable. + * You have to provide a compare function and the hash of the element you want to remove. + * If the element is not found, 0 is returned. + * If more equal elements are present, the first one is removed. + * \param cmp Compare function called with cmp_arg as first argument and with the element to compare as a second one. + * The function should return 0 for equal elements, anything other for different elements. + * \param cmp_arg Compare argument passed as first argument of the compare function. + * \param hash Hash of the element to find and remove. + * \return The removed element, or 0 if not found. + */ +void* tommy_hashlin_remove(tommy_hashlin* hashlin, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash); + +/** \internal + * Returns the bucket at the specified position. + */ +tommy_inline tommy_hashlin_node** tommy_hashlin_pos(tommy_hashlin* hashlin, tommy_hash_t pos) +{ + tommy_uint_t bsr; + + /* get the highest bit set, in case of all 0, return 0 */ + bsr = tommy_ilog2_u32(pos | 1); + + return &hashlin->bucket[bsr][pos]; +} + +/** \internal + * Returns a pointer to the bucket of the specified hash. + */ +tommy_inline tommy_hashlin_node** tommy_hashlin_bucket_ref(tommy_hashlin* hashlin, tommy_hash_t hash) +{ + tommy_count_t pos; + tommy_count_t high_pos; + + pos = hash & hashlin->low_mask; + high_pos = hash & hashlin->bucket_mask; + + /* if this position is already allocated in the high half */ + if (pos < hashlin->split) { + /* The following assigment is expected to be implemented */ + /* with a conditional move instruction */ + /* that results in a little better and constant performance */ + /* regardless of the split position. */ + /* This affects mostly the worst case, when the split value */ + /* is near at its half, resulting in a totally unpredictable */ + /* condition by the CPU. */ + /* In such case the use of the conditional move is generally faster. */ + + /* use also the high bit */ + pos = high_pos; + } + + return tommy_hashlin_pos(hashlin, pos); +} + +/** + * Gets the bucket of the specified hash. + * The bucket is guaranteed to contain ALL the elements with the specified hash, + * but it can contain also others. + * You can access elements in the bucket following the ::next pointer until 0. + * \param hash Hash of the element to find. + * \return The head of the bucket, or 0 if empty. + */ +tommy_inline tommy_hashlin_node* tommy_hashlin_bucket(tommy_hashlin* hashlin, tommy_hash_t hash) +{ + return *tommy_hashlin_bucket_ref(hashlin, hash); +} + +/** + * Searches an element in the hashtable. + * You have to provide a compare function and the hash of the element you want to find. + * If more equal elements are present, the first one is returned. + * \param cmp Compare function called with cmp_arg as first argument and with the element to compare as a second one. + * The function should return 0 for equal elements, anything other for different elements. + * \param cmp_arg Compare argument passed as first argument of the compare function. + * \param hash Hash of the element to find. + * \return The first element found, or 0 if none. + */ +tommy_inline void* tommy_hashlin_search(tommy_hashlin* hashlin, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash) +{ + tommy_hashlin_node* i = tommy_hashlin_bucket(hashlin, hash); + + while (i) { + /* we first check if the hash matches, as in the same bucket we may have multiples hash values */ + if (i->key == hash && cmp(cmp_arg, i->data) == 0) + return i->data; + i = i->next; + } + return 0; +} + +/** + * Removes an element from the hashtable. + * You must already have the address of the element to remove. + * \return The tommy_node::data field of the node removed. + */ +void* tommy_hashlin_remove_existing(tommy_hashlin* hashlin, tommy_hashlin_node* node); + +/** + * Calls the specified function for each element in the hashtable. + * + * You cannot add or remove elements from the inside of the callback, + * but can use it to deallocate them. + * + * \code + * tommy_hashlin hashlin; + * + * // initializes the hashtable + * tommy_hashlin_init(&hashlin); + * + * ... + * + * // creates an object + * struct object* obj = malloc(sizeof(struct object)); + * + * ... + * + * // insert it in the hashtable + * tommy_hashlin_insert(&hashlin, &obj->node, obj, tommy_inthash_u32(obj->value)); + * + * ... + * + * // deallocates all the objects iterating the hashtable + * tommy_hashlin_foreach(&hashlin, free); + * + * // deallocates the hashtable + * tommy_hashlin_done(&hashlin); + * \endcode + */ +void tommy_hashlin_foreach(tommy_hashlin* hashlin, tommy_foreach_func* func); + +/** + * Calls the specified function with an argument for each element in the hashtable. + */ +void tommy_hashlin_foreach_arg(tommy_hashlin* hashlin, tommy_foreach_arg_func* func, void* arg); + +/** + * Gets the number of elements. + */ +tommy_inline tommy_count_t tommy_hashlin_count(tommy_hashlin* hashlin) +{ + return hashlin->count; +} + +/** + * Gets the size of allocated memory. + * It includes the size of the ::tommy_hashlin_node of the stored elements. + */ +tommy_size_t tommy_hashlin_memory_usage(tommy_hashlin* hashlin); + +#endif + diff --git a/sources/tommyds/tommyhashtbl.c b/sources/tommyds/tommyhashtbl.c new file mode 100755 index 00000000..007b8ece --- /dev/null +++ b/sources/tommyds/tommyhashtbl.c @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommyhashtbl.h" +#include "tommylist.h" + +#include /* for memset */ + +/******************************************************************************/ +/* hashtable */ + +void tommy_hashtable_init(tommy_hashtable* hashtable, tommy_count_t bucket_max) +{ + if (bucket_max < 16) + bucket_max = 16; + else + bucket_max = tommy_roundup_pow2_u32(bucket_max); + + hashtable->bucket_max = bucket_max; + hashtable->bucket_mask = hashtable->bucket_max - 1; + + /* initialize the vector using malloc()+memset() instead of calloc() */ + /* to ensure that all the memory in really allocated immediately */ + /* by the OS, and not deferred at later time. */ + /* this improves performance, because we start with a fully initialized hashtable. */ + hashtable->bucket = tommy_cast(tommy_hashtable_node**, tommy_malloc(hashtable->bucket_max * sizeof(tommy_hashtable_node*))); + memset(hashtable->bucket, 0, hashtable->bucket_max * sizeof(tommy_hashtable_node*)); + + hashtable->count = 0; +} + +void tommy_hashtable_done(tommy_hashtable* hashtable) +{ + tommy_free(hashtable->bucket); +} + +void tommy_hashtable_insert(tommy_hashtable* hashtable, tommy_hashtable_node* node, void* data, tommy_hash_t hash) +{ + tommy_count_t pos = hash & hashtable->bucket_mask; + + tommy_list_insert_tail(&hashtable->bucket[pos], node, data); + + node->key = hash; + + ++hashtable->count; +} + +void* tommy_hashtable_remove_existing(tommy_hashtable* hashtable, tommy_hashtable_node* node) +{ + tommy_count_t pos = node->key & hashtable->bucket_mask; + + tommy_list_remove_existing(&hashtable->bucket[pos], node); + + --hashtable->count; + + return node->data; +} + +void* tommy_hashtable_remove(tommy_hashtable* hashtable, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash) +{ + tommy_count_t pos = hash & hashtable->bucket_mask; + tommy_hashtable_node* node = hashtable->bucket[pos]; + + while (node) { + /* we first check if the hash matches, as in the same bucket we may have multiples hash values */ + if (node->key == hash && cmp(cmp_arg, node->data) == 0) { + tommy_list_remove_existing(&hashtable->bucket[pos], node); + + --hashtable->count; + + return node->data; + } + node = node->next; + } + + return 0; +} + +void tommy_hashtable_foreach(tommy_hashtable* hashtable, tommy_foreach_func* func) +{ + tommy_count_t bucket_max = hashtable->bucket_max; + tommy_hashtable_node** bucket = hashtable->bucket; + tommy_count_t pos; + + for (pos = 0; pos < bucket_max; ++pos) { + tommy_hashtable_node* node = bucket[pos]; + + while (node) { + void* data = node->data; + node = node->next; + func(data); + } + } +} + +void tommy_hashtable_foreach_arg(tommy_hashtable* hashtable, tommy_foreach_arg_func* func, void* arg) +{ + tommy_count_t bucket_max = hashtable->bucket_max; + tommy_hashtable_node** bucket = hashtable->bucket; + tommy_count_t pos; + + for (pos = 0; pos < bucket_max; ++pos) { + tommy_hashtable_node* node = bucket[pos]; + + while (node) { + void* data = node->data; + node = node->next; + func(arg, data); + } + } +} + +tommy_size_t tommy_hashtable_memory_usage(tommy_hashtable* hashtable) +{ + return hashtable->bucket_max * (tommy_size_t)sizeof(hashtable->bucket[0]) + + tommy_hashtable_count(hashtable) * (tommy_size_t)sizeof(tommy_hashtable_node); +} + diff --git a/sources/tommyds/tommyhashtbl.h b/sources/tommyds/tommyhashtbl.h new file mode 100755 index 00000000..0cb63322 --- /dev/null +++ b/sources/tommyds/tommyhashtbl.h @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Fixed size chained hashtable. + * + * This hashtable is a standard implementation of a chained hashtable with a fixed size. + * + * Note that performances starts to degenerate after reaching a load factor greater than 0.75. + * The ::tommy_hashdyn and ::tommy_hashlin hashtables fix this problem growing dynamically. + * + * To initialize the hashtable you have to call tommy_hashtable_init() specifing + * the fixed bucket size. + * + * \code + * tommy_hashslin hashtable; + * + * tommy_hashtable_init(&hashtable, 1024); + * \endcode + * + * To insert elements in the hashtable you have to call tommy_hashtable_insert() for + * each element. + * In the insertion call you have to specify the address of the node, the + * address of the object, and the hash value of the key to use. + * The address of the object is used to initialize the tommy_node::data field + * of the node, and the hash to initialize the tommy_node::key field. + * + * \code + * struct object { + * int value; + * // other fields + * tommy_node node; + * }; + * + * struct object* obj = malloc(sizeof(struct object)); // creates the object + * + * obj->value = ...; // initializes the object + * + * tommy_hashtable_insert(&hashtable, &obj->node, obj, tommy_inthash_u32(obj->value)); // inserts the object + * \endcode + * + * To find and element in the hashtable you have to call tommy_hashtable_search() + * providing a comparison function, its argument, and the hash of the key to search. + * + * \code + * int compare(const void* arg, const void* obj) + * { + * return *(const int*)arg != ((const struct object*)obj)->value; + * } + * + * int value_to_find = 1; + * struct object* obj = tommy_hashtable_search(&hashtable, compare, &value_to_find, tommy_inthash_u32(value_to_find)); + * if (!obj) { + * // not found + * } else { + * // found + * } + * \endcode + * + * To iterate over all the elements in the hashtable with the same key, you have to + * use tommy_hashtable_bucket() and follow the tommy_node::next pointer until NULL. + * You have also to check explicitely for the key, as the bucket may contains + * different keys. + * + * \code + * tommy_node* i = tommy_hashtable_bucket(&hashtable, tommy_inthash_u32(value_to_find)); + * while (i) { + * struct object* obj = i->data; // gets the object pointer + * + * if (obj->value == value_to_find) { + * printf("%d\n", obj->value); // process the object + * } + * + * i = i->next; // goes to the next element + * } + * \endcode + * + * To remove an element from the hashtable you have to call tommy_hashtable_remove() + * providing a comparison function, its argument, and the hash of the key to search + * and remove. + * + * \code + * struct object* obj = tommy_hashtable_remove(&hashtable, compare, &value_to_remove, tommy_inthash_u32(value_to_remove)); + * if (obj) { + * free(obj); // frees the object allocated memory + * } + * \endcode + * + * To destroy the hashtable you have to remove all the elements, and deinitialize + * the hashtable calling tommy_hashtable_done(). + * + * \code + * tommy_hashtable_done(&hashtable); + * \endcode + * + * If you need to iterate over all the elements in the hashtable, you can use + * tommy_hashtable_foreach() or tommy_hashtable_foreach_arg(). + * If you need a more precise control with a real iteration, you have to insert + * all the elements also in a ::tommy_list, and use the list to iterate. + * See the \ref multiindex example for more detail. + */ + +#ifndef __TOMMYHASHTBL_H +#define __TOMMYHASHTBL_H + +#include "tommyhash.h" + +/******************************************************************************/ +/* hashtable */ + +/** + * Hashtable node. + * This is the node that you have to include inside your objects. + */ +typedef tommy_node tommy_hashtable_node; + +/** + * Hashtable container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_hashtable_struct { + tommy_hashtable_node** bucket; /**< Hash buckets. One list for each hash modulus. */ + tommy_count_t bucket_max; /**< Number of buckets. */ + tommy_count_t bucket_mask; /**< Bit mask to access the buckets. */ + tommy_count_t count; /**< Number of elements. */ +} tommy_hashtable; + +/** + * Initializes the hashtable. + * \param buckets Minimum number of buckets to allocate. The effective number used is the next power of 2. + */ +void tommy_hashtable_init(tommy_hashtable* hashtable, tommy_count_t bucket_max); + +/** + * Deinitializes the hashtable. + * + * You can call this function with elements still contained, + * but such elements are not going to be freed by this call. + */ +void tommy_hashtable_done(tommy_hashtable* hashtable); + +/** + * Inserts an element in the hashtable. + */ +void tommy_hashtable_insert(tommy_hashtable* hashtable, tommy_hashtable_node* node, void* data, tommy_hash_t hash); + +/** + * Searches and removes an element from the hashtable. + * You have to provide a compare function and the hash of the element you want to remove. + * If the element is not found, 0 is returned. + * If more equal elements are present, the first one is removed. + * \param cmp Compare function called with cmp_arg as first argument and with the element to compare as a second one. + * The function should return 0 for equal elements, anything other for different elements. + * \param cmp_arg Compare argument passed as first argument of the compare function. + * \param hash Hash of the element to find and remove. + * \return The removed element, or 0 if not found. + */ +void* tommy_hashtable_remove(tommy_hashtable* hashtable, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash); + +/** + * Gets the bucket of the specified hash. + * The bucket is guaranteed to contain ALL the elements with the specified hash, + * but it can contain also others. + * You can access elements in the bucket following the ::next pointer until 0. + * \param hash Hash of the element to find. + * \return The head of the bucket, or 0 if empty. + */ +tommy_inline tommy_hashtable_node* tommy_hashtable_bucket(tommy_hashtable* hashtable, tommy_hash_t hash) +{ + return hashtable->bucket[hash & hashtable->bucket_mask]; +} + +/** + * Searches an element in the hashtable. + * You have to provide a compare function and the hash of the element you want to find. + * If more equal elements are present, the first one is returned. + * \param cmp Compare function called with cmp_arg as first argument and with the element to compare as a second one. + * The function should return 0 for equal elements, anything other for different elements. + * \param cmp_arg Compare argument passed as first argument of the compare function. + * \param hash Hash of the element to find. + * \return The first element found, or 0 if none. + */ +tommy_inline void* tommy_hashtable_search(tommy_hashtable* hashtable, tommy_search_func* cmp, const void* cmp_arg, tommy_hash_t hash) +{ + tommy_hashtable_node* i = tommy_hashtable_bucket(hashtable, hash); + + while (i) { + /* we first check if the hash matches, as in the same bucket we may have multiples hash values */ + if (i->key == hash && cmp(cmp_arg, i->data) == 0) + return i->data; + i = i->next; + } + return 0; +} + +/** + * Removes an element from the hashtable. + * You must already have the address of the element to remove. + * \return The tommy_node::data field of the node removed. + */ +void* tommy_hashtable_remove_existing(tommy_hashtable* hashtable, tommy_hashtable_node* node); + +/** + * Calls the specified function for each element in the hashtable. + * + * You cannot add or remove elements from the inside of the callback, + * but can use it to deallocate them. + * + * \code + * tommy_hashtable hashtable; + * + * // initializes the hashtable + * tommy_hashtable_init(&hashtable, ...); + * + * ... + * + * // creates an object + * struct object* obj = malloc(sizeof(struct object)); + * + * ... + * + * // insert it in the hashtable + * tommy_hashdyn_insert(&hashtable, &obj->node, obj, tommy_inthash_u32(obj->value)); + * + * ... + * + * // deallocates all the objects iterating the hashtable + * tommy_hashtable_foreach(&hashtable, free); + * + * // deallocates the hashtable + * tommy_hashdyn_done(&hashtable); + * \endcode + */ +void tommy_hashtable_foreach(tommy_hashtable* hashtable, tommy_foreach_func* func); + +/** + * Calls the specified function with an argument for each element in the hashtable. + */ +void tommy_hashtable_foreach_arg(tommy_hashtable* hashtable, tommy_foreach_arg_func* func, void* arg); + +/** + * Gets the number of elements. + */ +tommy_inline tommy_count_t tommy_hashtable_count(tommy_hashtable* hashtable) +{ + return hashtable->count; +} + +/** + * Gets the size of allocated memory. + * It includes the size of the ::tommy_hashtable_node of the stored elements. + */ +tommy_size_t tommy_hashtable_memory_usage(tommy_hashtable* hashtable); + +#endif + diff --git a/sources/tommyds/tommylist.c b/sources/tommyds/tommylist.c new file mode 100755 index 00000000..1fa1f243 --- /dev/null +++ b/sources/tommyds/tommylist.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommylist.h" +#include "tommychain.h" + +/** \internal + * Setup a list. + */ +tommy_inline void tommy_list_set(tommy_list* list, tommy_node* head, tommy_node* tail) +{ + head->prev = tail; + tail->next = 0; + *list = head; +} + +void tommy_list_sort(tommy_list* list, tommy_compare_func* cmp) +{ + tommy_chain chain; + tommy_node* head; + + if (tommy_list_empty(list)) + return; + + head = tommy_list_head(list); + + /* create a chain from the list */ + chain.head = head; + chain.tail = head->prev; + + tommy_chain_mergesort(&chain, cmp); + + /* restore the list */ + tommy_list_set(list, chain.head, chain.tail); +} + diff --git a/sources/tommyds/tommylist.h b/sources/tommyds/tommylist.h new file mode 100755 index 00000000..d88cee7b --- /dev/null +++ b/sources/tommyds/tommylist.h @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Double linked list for collisions into hashtables. + * + * This list is a double linked list mainly targetted for handling collisions + * into an hashtables, but useable also as a generic list. + * + * The main feature of this list is to require only one pointer to represent the + * list, compared to a classic implementation requiring a head an a tail pointers. + * This reduces the memory usage in hashtables. + * + * Another feature is to support the insertion at the end of the list. This allow to store + * collisions in a stable order. Where for stable order we mean that equal elements keep + * their insertion order. + * + * To initialize the list, you have to call tommy_list_init(), or to simply assign + * to it NULL, as an empty list is represented by the NULL value. + * + * \code + * tommy_list list; + * + * tommy_list_init(&list); // initializes the list + * \endcode + * + * To insert elements in the list you have to call tommy_list_insert_tail() + * or tommy_list_insert_head() for each element. + * In the insertion call you have to specify the address of the node and the + * address of the object. + * The address of the object is used to initialize the tommy_node::data field + * of the node. + * + * \code + * struct object { + * int value; + * // other fields + * tommy_node node; + * }; + * + * struct object* obj = malloc(sizeof(struct object)); // creates the object + * + * obj->value = ...; // initializes the object + * + * tommy_list_insert_tail(&list, &obj->node, obj); // inserts the object + * \endcode + * + * To iterate over all the elements in the list you have to call + * tommy_list_head() to get the head of the list and follow the + * tommy_node::next pointer until NULL. + * + * \code + * tommy_node* i = tommy_list_head(&list); + * while (i) { + * struct object* obj = i->data; // gets the object pointer + * + * printf("%d\n", obj->value); // process the object + * + * i = i->next; // go to the next element + * } + * \endcode + * + * To destroy the list you have to remove all the elements, + * as the list is completely inplace and it doesn't allocate memory. + * This can be done with the tommy_list_foreach() function. + * + * \code + * // deallocates all the objects iterating the list + * tommy_list_foreach(&list, free); + * \endcode + */ + +#ifndef __TOMMYLIST_H +#define __TOMMYLIST_H + +#include "tommytypes.h" + +/******************************************************************************/ +/* list */ + +/** + * Double linked list type. + */ +typedef tommy_node* tommy_list; + +/** + * Initializes the list. + * The list is completely inplace, so it doesn't need to be deinitialized. + */ +tommy_inline void tommy_list_init(tommy_list* list) +{ + *list = 0; +} + +/** + * Gets the head of the list. + * \return The head node. For empty lists 0 is returned. + */ +tommy_inline tommy_node* tommy_list_head(tommy_list* list) +{ + return *list; +} + +/** + * Gets the tail of the list. + * \return The tail node. For empty lists 0 is returned. + */ +tommy_inline tommy_node* tommy_list_tail(tommy_list* list) +{ + tommy_node* head = tommy_list_head(list); + + if (!head) + return 0; + + return head->prev; +} + +/** \internal + * Creates a new list with a single element. + * \param list The list to initialize. + * \param node The node to insert. + */ +tommy_inline void tommy_list_insert_first(tommy_list* list, tommy_node* node) +{ + /* one element "circular" prev list */ + node->prev = node; + + /* one element "0 terminated" next list */ + node->next = 0; + + *list = node; +} + +/** \internal + * Inserts an element at the head of a not empty list. + * The element is inserted at the head of the list. The list cannot be empty. + * \param list The list. The list cannot be empty. + * \param node The node to insert. + */ +tommy_inline void tommy_list_insert_head_not_empty(tommy_list* list, tommy_node* node) +{ + tommy_node* head = tommy_list_head(list); + + /* insert in the "circular" prev list */ + node->prev = head->prev; + head->prev = node; + + /* insert in the "0 terminated" next list */ + node->next = head; + + *list = node; +} + +/** \internal + * Inserts an element at the tail of a not empty list. + * The element is inserted at the tail of the list. The list cannot be empty. + * \param head The node at the list head. It cannot be 0. + * \param node The node to insert. + */ +tommy_inline void tommy_list_insert_tail_not_empty(tommy_node* head, tommy_node* node) +{ + /* insert in the "circular" prev list */ + node->prev = head->prev; + head->prev = node; + + /* insert in the "0 terminated" next list */ + node->next = 0; + node->prev->next = node; +} + +/** + * Inserts an element at the head of a list. + * \param node The node to insert. + * \param data The object containing the node. It's used to set the tommy_node::data field of the node. + */ +tommy_inline void tommy_list_insert_head(tommy_list* list, tommy_node* node, void* data) +{ + tommy_node* head = tommy_list_head(list); + + if (head) + tommy_list_insert_head_not_empty(list, node); + else + tommy_list_insert_first(list, node); + + node->data = data; +} + +/** + * Inserts an element at the tail of a list. + * \param node The node to insert. + * \param data The object containing the node. It's used to set the tommy_node::data field of the node. + */ +tommy_inline void tommy_list_insert_tail(tommy_list* list, tommy_node* node, void* data) +{ + tommy_node* head = tommy_list_head(list); + + if (head) + tommy_list_insert_tail_not_empty(head, node); + else + tommy_list_insert_first(list, node); + + node->data = data; +} + +/** \internal + * Removes an element from the head of a not empty list. + * \param list The list. The list cannot be empty. + * \return The node removed. + */ +tommy_inline tommy_node* tommy_list_remove_head_not_empty(tommy_list* list) +{ + tommy_node* head = tommy_list_head(list); + + /* remove from the "circular" prev list */ + head->next->prev = head->prev; + + /* remove from the "0 terminated" next list */ + *list = head->next; /* the new head, in case 0 */ + + return head; +} + +/** + * Removes an element from the list. + * You must already have the address of the element to remove. + * \note The node content is left unchanged, including the tommy_node::next + * and tommy_node::prev fields that still contain pointers at the list. + * \param node The node to remove. The node must be in the list. + * \return The tommy_node::data field of the node removed. + */ +tommy_inline void* tommy_list_remove_existing(tommy_list* list, tommy_node* node) +{ + tommy_node* head = tommy_list_head(list); + + /* remove from the "circular" prev list */ + if (node->next) + node->next->prev = node->prev; + else + head->prev = node->prev; /* the last */ + + /* remove from the "0 terminated" next list */ + if (head == node) + *list = node->next; /* the new head, in case 0 */ + else + node->prev->next = node->next; + + return node->data; +} + +/** + * Concats two lists. + * The second list is concatenated at the first list. + * \param first The first list. + * \param second The second list. After this call the list content is undefined, + * and you should not use it anymore. + */ +tommy_inline void tommy_list_concat(tommy_list* first, tommy_list* second) +{ + tommy_node* first_head; + tommy_node* first_tail; + tommy_node* second_head; + + /* if the second is empty, nothing to do */ + second_head = tommy_list_head(second); + if (second_head == 0) + return; + + /* if the first is empty, copy the second */ + first_head = tommy_list_head(first); + if (first_head == 0) { + *first = *second; + return; + } + + /* tail of the first list */ + first_tail = first_head->prev; + + /* set the "circular" prev list */ + first_head->prev = second_head->prev; + second_head->prev = first_tail; + + /* set the "0 terminated" next list */ + first_tail->next = second_head; +} + +/** + * Sorts a list. + * It's a stable merge sort with O(N*log(N)) worst complexity. + * It's faster on degenerated cases like partially ordered lists. + * \param cmp Compare function called with two elements. + * The function should return <0 if the first element is less than the second, ==0 if equal, and >0 if greather. + */ +void tommy_list_sort(tommy_list* list, tommy_compare_func* cmp); + +/** + * Checks if empty. + * \return If the list is empty. + */ +tommy_inline tommy_bool_t tommy_list_empty(tommy_list* list) +{ + return tommy_list_head(list) == 0; +} + +/** + * Gets the number of elements. + * \note This operation is O(n). + */ +tommy_inline tommy_count_t tommy_list_count(tommy_list* list) +{ + tommy_count_t count = 0; + tommy_node* i = tommy_list_head(list); + + while (i) { + ++count; + i = i->next; + } + + return count; +} + +/** + * Calls the specified function for each element in the list. + * + * You cannot add or remove elements from the inside of the callback, + * but can use it to deallocate them. + * + * \code + * tommy_list list; + * + * // initializes the list + * tommy_list_init(&list); + * + * ... + * + * // creates an object + * struct object* obj = malloc(sizeof(struct object)); + * + * ... + * + * // insert it in the list + * tommy_list_insert_tail(&list, &obj->node, obj); + * + * ... + * + * // deallocates all the objects iterating the list + * tommy_list_foreach(&list, free); + * \endcode + */ +tommy_inline void tommy_list_foreach(tommy_list* list, tommy_foreach_func* func) +{ + tommy_node* node = tommy_list_head(list); + + while (node) { + void* data = node->data; + node = node->next; + func(data); + } +} + +/** + * Calls the specified function with an argument for each element in the list. + */ +tommy_inline void tommy_list_foreach_arg(tommy_list* list, tommy_foreach_arg_func* func, void* arg) +{ + tommy_node* node = tommy_list_head(list); + + while (node) { + void* data = node->data; + node = node->next; + func(arg, data); + } +} + +#endif + diff --git a/sources/tommyds/tommytree.c b/sources/tommyds/tommytree.c new file mode 100755 index 00000000..6b2f1a99 --- /dev/null +++ b/sources/tommyds/tommytree.c @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2015, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommytree.h" + +#include /* for assert */ + +/******************************************************************************/ +/* tree */ + +void tommy_tree_init(tommy_tree* tree, tommy_compare_func* cmp) +{ + tree->root = 0; + tree->count = 0; + tree->cmp = cmp; +} + +static int tommy_tree_delta(tommy_tree_node* root) +{ + int left_height = root->prev ? root->prev->key : 0; + int right_height = root->next ? root->next->key : 0; + + return left_height - right_height; +} + +/* AVL tree operations */ +static tommy_tree_node* tommy_tree_balance(tommy_tree_node*); + +static tommy_tree_node* tommy_tree_rotate_left(tommy_tree_node* root) +{ + tommy_tree_node* next = root->next; + + root->next = next->prev; + + next->prev = tommy_tree_balance(root); + + return tommy_tree_balance(next); +} + +static tommy_tree_node* tommy_tree_rotate_right(tommy_tree_node* root) +{ + tommy_tree_node* prev = root->prev; + + root->prev = prev->next; + + prev->next = tommy_tree_balance(root); + + return tommy_tree_balance(prev); +} + +static tommy_tree_node* tommy_tree_move_right(tommy_tree_node* root, tommy_tree_node* node) +{ + if (!root) + return node; + + root->next = tommy_tree_move_right(root->next, node); + + return tommy_tree_balance(root); +} + +static tommy_tree_node* tommy_tree_balance(tommy_tree_node* root) +{ + int delta = tommy_tree_delta(root); + + if (delta < -1) { + if (tommy_tree_delta(root->next) > 0) + root->next = tommy_tree_rotate_right(root->next); + return tommy_tree_rotate_left(root); + } + + if (delta > 1) { + if (tommy_tree_delta(root->prev) < 0) + root->prev = tommy_tree_rotate_left(root->prev); + return tommy_tree_rotate_right(root); + } + + /* recompute key */ + root->key = 0; + + if (root->prev && root->prev->key > root->key) + root->key = root->prev->key; + + if (root->next && root->next->key > root->key) + root->key = root->next->key; + + /* count itself */ + root->key += 1; + + return root; +} + +static tommy_tree_node* tommy_tree_insert_node(tommy_compare_func* cmp, tommy_tree_node* root, tommy_tree_node** let) +{ + int c; + + if (!root) + return *let; + + c = cmp((*let)->data, root->data); + + if (c < 0) { + root->prev = tommy_tree_insert_node(cmp, root->prev, let); + return tommy_tree_balance(root); + } + + if (c > 0) { + root->next = tommy_tree_insert_node(cmp, root->next, let); + return tommy_tree_balance(root); + } + + /* already present, set the return pointer */ + *let = root; + + return root; +} + +void* tommy_tree_insert(tommy_tree* tree, tommy_tree_node* node, void* data) +{ + tommy_tree_node* insert = node; + + insert->data = data; + insert->prev = 0; + insert->next = 0; + insert->key = 0; + + tree->root = tommy_tree_insert_node(tree->cmp, tree->root, &insert); + + if (insert == node) + ++tree->count; + + return insert->data; +} + +static tommy_tree_node* tommy_tree_remove_node(tommy_compare_func* cmp, tommy_tree_node* root, void* data, tommy_tree_node** let) +{ + int c; + + if (!root) + return 0; + + c = cmp(data, root->data); + + if (c < 0) { + root->prev = tommy_tree_remove_node(cmp, root->prev, data, let); + return tommy_tree_balance(root); + } + + if (c > 0) { + root->next = tommy_tree_remove_node(cmp, root->next, data, let); + return tommy_tree_balance(root); + } + + /* found */ + *let = root; + + return tommy_tree_move_right(root->prev, root->next); +} + +void* tommy_tree_remove(tommy_tree* tree, void* data) +{ + tommy_tree_node* node = 0; + + tree->root = tommy_tree_remove_node(tree->cmp, tree->root, data, &node); + + if (!node) + return 0; + + --tree->count; + + return node->data; +} + +static tommy_tree_node* tommy_tree_search_node(tommy_compare_func* cmp, tommy_tree_node* root, void* data) +{ + int c; + + if (!root) + return 0; + + c = cmp(data, root->data); + + if (c < 0) + return tommy_tree_search_node(cmp, root->prev, data); + + if (c > 0) + return tommy_tree_search_node(cmp, root->next, data); + + return root; +} + +void* tommy_tree_search(tommy_tree* tree, void* data) +{ + tommy_tree_node* node = tommy_tree_search_node(tree->cmp, tree->root, data); + + if (!node) + return 0; + + return node->data; +} + +void* tommy_tree_search_compare(tommy_tree* tree, tommy_compare_func* cmp, void* cmp_arg) +{ + tommy_tree_node* node = tommy_tree_search_node(cmp, tree->root, cmp_arg); + + if (!node) + return 0; + + return node->data; +} + +void* tommy_tree_remove_existing(tommy_tree* tree, tommy_tree_node* node) +{ + void* data = tommy_tree_remove(tree, node->data); + + assert(data != 0); + + return data; +} + +static void tommy_tree_foreach_node(tommy_tree_node* root, tommy_foreach_func* func) +{ + tommy_tree_node* next; + + if (!root) + return; + + tommy_tree_foreach_node(root->prev, func); + + /* make a copy in case func is free() */ + next = root->next; + + func(root->data); + + tommy_tree_foreach_node(next, func); +} + +void tommy_tree_foreach(tommy_tree* tree, tommy_foreach_func* func) +{ + tommy_tree_foreach_node(tree->root, func); +} + +static void tommy_tree_foreach_arg_node(tommy_tree_node* root, tommy_foreach_arg_func* func, void* arg) +{ + tommy_tree_node* next; + + if (!root) + return; + + tommy_tree_foreach_arg_node(root->prev, func, arg); + + /* make a copy in case func is free() */ + next = root->next; + + func(arg, root->data); + + tommy_tree_foreach_arg_node(next, func, arg); +} + +void tommy_tree_foreach_arg(tommy_tree* tree, tommy_foreach_arg_func* func, void* arg) +{ + tommy_tree_foreach_arg_node(tree->root, func, arg); +} + +tommy_size_t tommy_tree_memory_usage(tommy_tree* tree) +{ + return tommy_tree_count(tree) * sizeof(tommy_tree_node); +} + diff --git a/sources/tommyds/tommytree.h b/sources/tommyds/tommytree.h new file mode 100755 index 00000000..55e40060 --- /dev/null +++ b/sources/tommyds/tommytree.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2015, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * AVL tree. + * + * This tree is a standard AVL tree implementation that stores elements in the + * order defined by the comparison function. + * + * As difference than other tommy containers, duplicate elements cannot be inserted. + * + * To initialize a tree you have to call tommy_tree_init() specifing a comparison + * function that will define the order in the tree. + * + * \code + * tommy_tree tree; + * + * tommy_tree_init(&tree, cmp); + * \endcode + * + * To insert elements in the tree you have to call tommy_tree_insert() for + * each element. + * In the insertion call you have to specify the address of the node and the + * address of the object. + * The address of the object is used to initialize the tommy_node::data field + * of the node. + * + * \code + * struct object { + * int value; + * // other fields + * tommy_tree_node node; + * }; + * + * struct object* obj = malloc(sizeof(struct object)); // creates the object + * + * obj->value = ...; // initializes the object + * + * tommy_tree_insert(&tree, &obj->node, obj); // inserts the object + * \endcode + * + * To find and element in the tree you have to call tommy_tree_search() providing + * the key to search. + * + * \code + * struct object value_to_find = { 1 }; + * struct object* obj = tommy_tree_search(&tree, &value_to_find); + * if (!obj) { + * // not found + * } else { + * // found + * } + * \endcode + * + * To remove an element from the tree you have to call tommy_tree_remove() + * providing the key to search and remove. + * + * \code + * struct object value_to_remove = { 1 }; + * struct object* obj = tommy_tree_remove(&tree, &value_to_remove); + * if (obj) { + * free(obj); // frees the object allocated memory + * } + * \endcode + * + * To destroy the tree you have to remove or destroy all the contained elements. + * The tree itself doesn't have or need a deallocation function. + * + * If you need to iterate over all the elements in the tree, you can use + * tommy_tree_foreach() or tommy_tree_foreach_arg(). + * If you need a more precise control with a real iteration, you have to insert + * all the elements also in a ::tommy_list, and use the list to iterate. + * See the \ref multiindex example for more detail. + */ + +#ifndef __TOMMYTREE_H +#define __TOMMYTREE_H + +#include "tommytypes.h" + +/******************************************************************************/ +/* tree */ + +/** + * Tree node. + * This is the node that you have to include inside your objects. + */ +typedef tommy_node tommy_tree_node; + +/** + * Tree container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_tree_struct { + tommy_tree_node* root; /**< Root node. */ + tommy_count_t count; /**< Number of elements. */ + tommy_compare_func* cmp; /**< Comparison function. */ +} tommy_tree; + +/** + * Initializes the tree. + * \param cmp The comparison function that defines the orderin the tree. + */ +void tommy_tree_init(tommy_tree* tree, tommy_compare_func* cmp); + +/** + * Inserts an element in the tree. + * If the element is already present, it's not inserted again. + * Check the return value to identify if the element was already present or not. + * You have to provide the pointer of the node embedded into the object and + * the pointer to the object. + * \param node Pointer to the node embedded into the object to insert. + * \param data Pointer to the object to insert. + * \return The element in the tree. Either the already existing one, or the one just inserted. + */ +void* tommy_tree_insert(tommy_tree* tree, tommy_tree_node* node, void* data); + +/** + * Searches and removes an element. + * If the element is not found, 0 is returned. + * \param data Element used for comparison. + * \return The removed element, or 0 if not found. + */ +void* tommy_tree_remove(tommy_tree* tree, void* data); + +/** + * Searches an element in the tree. + * If the element is not found, 0 is returned. + * \param data Element used for comparison. + * \return The first element found, or 0 if none. + */ +void* tommy_tree_search(tommy_tree* tree, void* data); + +/** + * Searches an element in the tree with a specific comparison function. + * + * Like tommy_tree_search() but you can specify a different comparison function. + * Note that this function must define a suborder of the original one. + * + * The ::data argument will be the first argument of the comparison function, + * and it can be of a different type of the objects in the tree. + */ +void* tommy_tree_search_compare(tommy_tree* tree, tommy_compare_func* cmp, void* cmp_arg); + +/** + * Removes an element from the tree. + * You must already have the address of the element to remove. + * \return The tommy_node::data field of the node removed. + */ +void* tommy_tree_remove_existing(tommy_tree* tree, tommy_tree_node* node); + +/** + * Calls the specified function for each element in the tree. + * + * The elements are processed in order. + * + * You cannot add or remove elements from the inside of the callback, + * but can use it to deallocate them. + * + * \code + * tommy_tree tree; + * + * // initializes the tree + * tommy_tree_init(&tree, cmp); + * + * ... + * + * // creates an object + * struct object* obj = malloc(sizeof(struct object)); + * + * ... + * + * // insert it in the tree + * tommy_tree_insert(&tree, &obj->node, obj); + * + * ... + * + * // deallocates all the objects iterating the tree + * tommy_tree_foreach(&tree, free); + * \endcode + */ +void tommy_tree_foreach(tommy_tree* tree, tommy_foreach_func* func); + +/** + * Calls the specified function with an argument for each element in the tree. + */ +void tommy_tree_foreach_arg(tommy_tree* tree, tommy_foreach_arg_func* func, void* arg); + +/** + * Gets the number of elements. + */ +tommy_inline tommy_count_t tommy_tree_count(tommy_tree* tree) +{ + return tree->count; +} + +/** + * Gets the size of allocated memory. + * It includes the size of the ::tommy_tree_node of the stored elements. + */ +tommy_size_t tommy_tree_memory_usage(tommy_tree* tree); + +#endif + diff --git a/sources/tommyds/tommytrie.c b/sources/tommyds/tommytrie.c new file mode 100755 index 00000000..35ac2198 --- /dev/null +++ b/sources/tommyds/tommytrie.c @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommytrie.h" +#include "tommylist.h" + +#include /* for assert */ + +/******************************************************************************/ +/* trie */ + +/** + * Mask for the inner branches. + */ +#define TOMMY_TRIE_TREE_MASK (TOMMY_TRIE_TREE_MAX - 1) + +/** + * Shift for the first level of branches. + */ +#define TOMMY_TRIE_BUCKET_SHIFT (TOMMY_KEY_BIT - TOMMY_TRIE_BUCKET_BIT) + +/** + * Max number of levels. + */ +#define TOMMY_TRIE_LEVEL_MAX ((TOMMY_KEY_BIT - TOMMY_TRIE_BUCKET_BIT) / TOMMY_TRIE_TREE_BIT) + +/** + * Hashtrie tree. + * A tree contains TOMMY_TRIE_TREE_MAX ordered pointers to . + * + * Each tree level uses exactly TOMMY_TRIE_TREE_BIT bits from the key. + */ +struct tommy_trie_tree_struct { + tommy_trie_node* map[TOMMY_TRIE_TREE_MAX]; +}; +typedef struct tommy_trie_tree_struct tommy_trie_tree; + +/** + * Kinds of an trie node. + */ +#define TOMMY_TRIE_TYPE_NODE 0 /**< The node is of type ::tommy_trie_node. */ +#define TOMMY_TRIE_TYPE_TREE 1 /**< The node is of type ::tommy_trie_tree. */ + +/** + * Get and set pointer of trie nodes. + * + * The pointer type is stored in the lower bit. + */ +#define trie_get_type(ptr) (((tommy_uintptr_t)(ptr)) & 1) +#define trie_get_tree(ptr) ((tommy_trie_tree*)(((tommy_uintptr_t)(ptr)) - TOMMY_TRIE_TYPE_TREE)) +#define trie_set_tree(ptr) (void*)(((tommy_uintptr_t)(ptr)) + TOMMY_TRIE_TYPE_TREE) + +void tommy_trie_init(tommy_trie* trie, tommy_allocator* alloc) +{ + tommy_uint_t i; + + for (i = 0; i < TOMMY_TRIE_BUCKET_MAX; ++i) + trie->bucket[i] = 0; + + trie->count = 0; + trie->node_count = 0; + + trie->alloc = alloc; +} + +static void trie_bucket_insert(tommy_trie* trie, tommy_uint_t shift, tommy_trie_node** let_ptr, tommy_trie_node* insert, tommy_key_t key) +{ + tommy_trie_tree* tree; + tommy_trie_node* node; + void* ptr; + tommy_uint_t i; + tommy_uint_t j; + +recurse: + ptr = *let_ptr; + + /* if null, just insert the node */ + if (!ptr) { + /* setup the node as a list */ + tommy_list_insert_first(let_ptr, insert); + return; + } + + if (trie_get_type(ptr) == TOMMY_TRIE_TYPE_TREE) { + /* repeat the process one level down */ + let_ptr = &trie_get_tree(ptr)->map[(key >> shift) & TOMMY_TRIE_TREE_MASK]; + shift -= TOMMY_TRIE_TREE_BIT; + goto recurse; + } + + node = tommy_cast(tommy_trie_node*, ptr); + + /* if it's the same key, insert in the list */ + if (node->key == key) { + tommy_list_insert_tail_not_empty(node, insert); + return; + } + +expand: + /* convert to a tree */ + tree = tommy_cast(tommy_trie_tree*, tommy_allocator_alloc(trie->alloc)); + ++trie->node_count; + *let_ptr = tommy_cast(tommy_trie_node*, trie_set_tree(tree)); + + /* initialize it */ + for (i = 0; i < TOMMY_TRIE_TREE_MAX; ++i) + tree->map[i] = 0; + + /* get the position of the two elements */ + i = (node->key >> shift) & TOMMY_TRIE_TREE_MASK; + j = (key >> shift) & TOMMY_TRIE_TREE_MASK; + + /* if they don't collide */ + if (i != j) { + /* insert the already existing element */ + tree->map[i] = node; + + /* insert the new node */ + tommy_list_insert_first(&tree->map[j], insert); + return; + } + + /* expand one more level */ + let_ptr = &tree->map[i]; + shift -= TOMMY_TRIE_TREE_BIT; + goto expand; +} + +void tommy_trie_insert(tommy_trie* trie, tommy_trie_node* node, void* data, tommy_key_t key) +{ + tommy_trie_node** let_ptr; + + node->data = data; + node->key = key; + + let_ptr = &trie->bucket[key >> TOMMY_TRIE_BUCKET_SHIFT]; + + trie_bucket_insert(trie, TOMMY_TRIE_BUCKET_SHIFT, let_ptr, node, key); + + ++trie->count; +} + +static tommy_trie_node* trie_bucket_remove_existing(tommy_trie* trie, tommy_uint_t shift, tommy_trie_node** let_ptr, tommy_trie_node* remove, tommy_key_t key) +{ + tommy_trie_node* node; + tommy_trie_tree* tree; + void* ptr; + tommy_trie_node** let_back[TOMMY_TRIE_LEVEL_MAX + 1]; + tommy_uint_t level; + tommy_uint_t i; + tommy_uint_t count; + tommy_uint_t last; + + level = 0; +recurse: + ptr = *let_ptr; + + if (!ptr) + return 0; + + if (trie_get_type(ptr) == TOMMY_TRIE_TYPE_TREE) { + tree = trie_get_tree(ptr); + + /* save the path */ + let_back[level++] = let_ptr; + + /* go down one level */ + let_ptr = &tree->map[(key >> shift) & TOMMY_TRIE_TREE_MASK]; + shift -= TOMMY_TRIE_TREE_BIT; + + goto recurse; + } + + node = tommy_cast(tommy_trie_node*, ptr); + + /* if the node to remove is not specified */ + if (!remove) { + /* remove the first */ + remove = node; + + /* check if it's really the element to remove */ + if (remove->key != key) + return 0; + } + + tommy_list_remove_existing(let_ptr, remove); + + /* if the list is not empty, try to reduce */ + if (*let_ptr || !level) + return remove; + +reduce: + /* go one level up */ + let_ptr = let_back[--level]; + + tree = trie_get_tree(*let_ptr); + + /* check if there is only one child node */ + count = 0; + last = 0; + for (i = 0; i < TOMMY_TRIE_TREE_MAX; ++i) { + if (tree->map[i]) { + /* if we have a sub tree, we cannot reduce */ + if (trie_get_type(tree->map[i]) != TOMMY_TRIE_TYPE_NODE) + return remove; + /* if more than one node, we cannot reduce */ + if (++count > 1) + return remove; + last = i; + } + } + + /* here count is never 0, as we cannot have a tree with only one sub node */ + assert(count == 1); + + *let_ptr = tree->map[last]; + + tommy_allocator_free(trie->alloc, tree); + --trie->node_count; + + /* repeat until more level */ + if (level) + goto reduce; + + return remove; +} + +void* tommy_trie_remove(tommy_trie* trie, tommy_key_t key) +{ + tommy_trie_node* ret; + tommy_trie_node** let_ptr; + + let_ptr = &trie->bucket[key >> TOMMY_TRIE_BUCKET_SHIFT]; + + ret = trie_bucket_remove_existing(trie, TOMMY_TRIE_BUCKET_SHIFT, let_ptr, 0, key); + + if (!ret) + return 0; + + --trie->count; + + return ret->data; +} + +void* tommy_trie_remove_existing(tommy_trie* trie, tommy_trie_node* node) +{ + tommy_trie_node* ret; + tommy_key_t key = node->key; + tommy_trie_node** let_ptr; + + let_ptr = &trie->bucket[key >> TOMMY_TRIE_BUCKET_SHIFT]; + + ret = trie_bucket_remove_existing(trie, TOMMY_TRIE_BUCKET_SHIFT, let_ptr, node, key); + + /* the element removed must match the one passed */ + assert(ret == node); + + --trie->count; + + return ret->data; +} + +tommy_trie_node* tommy_trie_bucket(tommy_trie* trie, tommy_key_t key) +{ + tommy_trie_node* node; + void* ptr; + tommy_uint_t type; + tommy_uint_t shift; + + ptr = trie->bucket[key >> TOMMY_TRIE_BUCKET_SHIFT]; + + shift = TOMMY_TRIE_BUCKET_SHIFT; + +recurse: + if (!ptr) + return 0; + + type = trie_get_type(ptr); + + switch (type) { + case TOMMY_TRIE_TYPE_NODE : + node = tommy_cast(tommy_trie_node*, ptr); + if (node->key != key) + return 0; + return node; + default : + case TOMMY_TRIE_TYPE_TREE : + ptr = trie_get_tree(ptr)->map[(key >> shift) & TOMMY_TRIE_TREE_MASK]; + shift -= TOMMY_TRIE_TREE_BIT; + goto recurse; + } +} + +tommy_size_t tommy_trie_memory_usage(tommy_trie* trie) +{ + return tommy_trie_count(trie) * (tommy_size_t)sizeof(tommy_trie_node) + + trie->node_count * (tommy_size_t)TOMMY_TRIE_BLOCK_SIZE; +} + diff --git a/sources/tommyds/tommytrie.h b/sources/tommyds/tommytrie.h new file mode 100755 index 00000000..d82a5f29 --- /dev/null +++ b/sources/tommyds/tommytrie.h @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Trie optimized for cache utilization. + * + * This trie is a standard implementation that stores elements in the order defined + * by the key. + * + * It needs an external allocator for the inner nodes in the trie. + * + * You can control the number of branches of each node using the ::TOMMY_TRIE_TREE_MAX + * define. More branches imply more speed, but a bigger memory occupation. + * + * Compared to ::tommy_trie_inplace you have to provide a ::tommy_allocator allocator. + * Note that the C malloc() is too slow to futfill this role. + * + * To initialize the trie you have to call tommy_allocator_init() to initialize + * the allocator, and tommy_trie_init() for the trie. + * + * \code + * tommy_allocator alloc; + * tommy_trie trie; + * + * tommy_allocator_init(&alloc, TOMMY_TRIE_BLOCK_SIZE, TOMMY_TRIE_BLOCK_SIZE); + * + * tommy_trie_init(&trie, &alloc); + * \endcode + * + * To insert elements in the trie you have to call tommy_trie_insert() for + * each element. + * In the insertion call you have to specify the address of the node, the + * address of the object, and the key value to use. + * The address of the object is used to initialize the tommy_node::data field + * of the node, and the key to initialize the tommy_node::key field. + * + * \code + * struct object { + * int value; + * // other fields + * tommy_node node; + * }; + * + * struct object* obj = malloc(sizeof(struct object)); // creates the object + * + * obj->value = ...; // initializes the object + * + * tommy_trie_insert(&trie, &obj->node, obj, obj->value); // inserts the object + * \endcode + * + * To find and element in the trie you have to call tommy_trie_search() providing + * the key to search. + * + * \code + * int value_to_find = 1; + * struct object* obj = tommy_trie_search(&trie, value_to_find); + * if (!obj) { + * // not found + * } else { + * // found + * } + * \endcode + * + * To iterate over all the elements in the trie with the same key, you have to + * use tommy_trie_bucket() and follow the tommy_node::next pointer until NULL. + * + * \code + * int value_to_find = 1; + * tommy_node* i = tommy_trie_bucket(&trie, value_to_find); + * while (i) { + * struct object* obj = i->data; // gets the object pointer + * + * printf("%d\n", obj->value); // process the object + * + * i = i->next; // goes to the next element + * } + * \endcode + * + * To remove an element from the trie you have to call tommy_trie_remove() + * providing the key to search and remove. + * + * \code + * struct object* obj = tommy_trie_remove(&trie, value_to_remove); + * if (obj) { + * free(obj); // frees the object allocated memory + * } + * \endcode + * + * To destroy the trie you have to remove all the elements, and deinitialize + * the allocator using tommy_allocator_done(). + * + * \code + * tommy_allocator_done(&alloc); + * \endcode + * + * Note that you cannot iterate over all the elements in the trie using the + * trie itself. You have to insert all the elements also in a ::tommy_list, + * and use the list to iterate. See the \ref multiindex example for more detail. + */ + +#ifndef __TOMMYTRIE_H +#define __TOMMYTRIE_H + +#include "tommytypes.h" +#include "tommyalloc.h" + +/******************************************************************************/ +/* trie */ + +/** + * Number of branches on each inner node. It must be a power of 2. + * Suggested values are 8, 16 and 32. + * Any inner node, excluding leafs, contains a pointer to each branch. + * + * The default size is choosen to exactly fit a typical cache line of 64 bytes. + */ +#define TOMMY_TRIE_TREE_MAX (64 / sizeof(void*)) + +/** + * Trie block size. + * You must use this value to initialize the allocator. + */ +#define TOMMY_TRIE_BLOCK_SIZE (TOMMY_TRIE_TREE_MAX * sizeof(void*)) + +/** \internal + * Number of bits for each branch. + */ +#define TOMMY_TRIE_TREE_BIT TOMMY_ILOG2(TOMMY_TRIE_TREE_MAX) + +/** \internal + * Number of bits of the first level. + */ +#define TOMMY_TRIE_BUCKET_BIT ((TOMMY_KEY_BIT % TOMMY_TRIE_TREE_BIT) + TOMMY_TRIE_TREE_BIT) + +/** \internal + * Number of branches of the first level. + * It's like a inner branch, but bigger to get any remainder bits. + */ +#define TOMMY_TRIE_BUCKET_MAX (1 << TOMMY_TRIE_BUCKET_BIT) + +/** + * Trie node. + * This is the node that you have to include inside your objects. + */ +typedef tommy_node tommy_trie_node; + +/** + * Trie container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_trie_struct { + tommy_trie_node* bucket[TOMMY_TRIE_BUCKET_MAX]; /**< First tree level. */ + tommy_count_t count; /**< Number of elements. */ + tommy_count_t node_count; /**< Number of nodes. */ + tommy_allocator* alloc; /**< Allocator for internal nodes. */ +} tommy_trie; + +/** + * Initializes the trie. + * You have to provide an allocator initialized with *both* the size and align with TOMMY_TRIE_BLOCK_SIZE. + * You can share this allocator with other tries. + * + * The tries is completely allocated through the allocator, and it doesn't need to be deinitialized. + * \param alloc Allocator initialized with *both* the size and align with TOMMY_TRIE_BLOCK_SIZE. + */ +void tommy_trie_init(tommy_trie* trie, tommy_allocator* alloc); + +/** + * Inserts an element in the trie. + * You have to provide the pointer of the node embedded into the object, + * the pointer to the object and the key to use. + * \param node Pointer to the node embedded into the object to insert. + * \param data Pointer to the object to insert. + * \param key Key to use to insert the object. + */ +void tommy_trie_insert(tommy_trie* trie, tommy_trie_node* node, void* data, tommy_key_t key); + +/** + * Searches and removes the first element with the specified key. + * If the element is not found, 0 is returned. + * If more equal elements are present, the first one is removed. + * This operation is faster than calling tommy_trie_bucket() and tommy_trie_remove_existing() separately. + * \param key Key of the element to find and remove. + * \return The removed element, or 0 if not found. + */ +void* tommy_trie_remove(tommy_trie* trie, tommy_key_t key); + +/** + * Gets the bucket of the specified key. + * The bucket is guaranteed to contain ALL and ONLY the elements with the specified key. + * You can access elements in the bucket following the ::next pointer until 0. + * \param key Key of the element to find. + * \return The head of the bucket, or 0 if empty. + */ +tommy_trie_node* tommy_trie_bucket(tommy_trie* trie, tommy_key_t key); + +/** + * Searches an element in the trie. + * You have to provide the key of the element you want to find. + * If more elements with the same key are present, the first one is returned. + * \param key Key of the element to find. + * \return The first element found, or 0 if none. + */ +tommy_inline void* tommy_trie_search(tommy_trie* trie, tommy_key_t key) +{ + tommy_trie_node* i = tommy_trie_bucket(trie, key); + + if (!i) + return 0; + + return i->data; +} + +/** + * Removes an element from the trie. + * You must already have the address of the element to remove. + * \return The tommy_node::data field of the node removed. + */ +void* tommy_trie_remove_existing(tommy_trie* trie, tommy_trie_node* node); + +/** + * Gets the number of elements. + */ +tommy_inline tommy_count_t tommy_trie_count(tommy_trie* trie) +{ + return trie->count; +} + +/** + * Gets the size of allocated memory. + * It includes the size of the ::tommy_trie_node of the stored elements. + */ +tommy_size_t tommy_trie_memory_usage(tommy_trie* trie); + +#endif + diff --git a/sources/tommyds/tommytrieinp.c b/sources/tommyds/tommytrieinp.c new file mode 100755 index 00000000..26ec2c39 --- /dev/null +++ b/sources/tommyds/tommytrieinp.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "tommytrieinp.h" + +#include /* for assert */ + +/******************************************************************************/ +/* trie_inplace */ + +/** + * Mask for the inner branches. + */ +#define TOMMY_TRIE_INPLACE_TREE_MASK (TOMMY_TRIE_INPLACE_TREE_MAX - 1) + +/** + * Shift for the first level of branches. + */ +#define TOMMY_TRIE_INPLACE_BUCKET_SHIFT (TOMMY_KEY_BIT - TOMMY_TRIE_INPLACE_BUCKET_BIT) + +/** + * Create a new list with a single element. + */ +tommy_inline tommy_trie_inplace_node* tommy_trie_inplace_list_insert_first(tommy_trie_inplace_node* node) +{ + /* one element "circular" prev list */ + node->prev = node; + + /* one element "0 terminated" next list */ + node->next = 0; + + return node; +} + +/** + * Add an element to an existing list. + * \note The element is inserted at the end of the list. + */ +tommy_inline void tommy_trie_inplace_list_insert_tail_not_empty(tommy_trie_inplace_node* head, tommy_trie_inplace_node* node) +{ + /* insert in the list in the last position */ + + /* insert in the "circular" prev list */ + node->prev = head->prev; + head->prev = node; + + /* insert in the "0 terminated" next list */ + node->next = 0; + node->prev->next = node; +} + +/** + * Remove an element from the list. + */ +tommy_inline void tommy_trie_inplace_list_remove(tommy_trie_inplace_node** let_ptr, tommy_trie_inplace_node* node) +{ + tommy_trie_inplace_node* head = *let_ptr; + + /* remove from the "circular" prev list */ + if (node->next) + node->next->prev = node->prev; + else + head->prev = node->prev; /* the last */ + + /* remove from the "0 terminated" next list */ + if (head == node) + *let_ptr = node->next; /* the new first */ + else + node->prev->next = node->next; +} + +void tommy_trie_inplace_init(tommy_trie_inplace* trie_inplace) +{ + tommy_uint_t i; + + for (i = 0; i < TOMMY_TRIE_INPLACE_BUCKET_MAX; ++i) + trie_inplace->bucket[i] = 0; + + trie_inplace->count = 0; +} + +static void trie_inplace_bucket_insert(tommy_uint_t shift, tommy_trie_inplace_node** let_ptr, tommy_trie_inplace_node* insert, tommy_key_t key) +{ + tommy_trie_inplace_node* node; + + node = *let_ptr; + while (node && node->key != key) { + let_ptr = &node->map[(key >> shift) & TOMMY_TRIE_INPLACE_TREE_MASK]; + node = *let_ptr; + shift -= TOMMY_TRIE_INPLACE_TREE_BIT; + } + + /* if null, just insert the node */ + if (!node) { + /* setup the node as a list */ + *let_ptr = tommy_trie_inplace_list_insert_first(insert); + } else { + /* if it's the same key, insert in the list */ + tommy_trie_inplace_list_insert_tail_not_empty(node, insert); + } +} + +void tommy_trie_inplace_insert(tommy_trie_inplace* trie_inplace, tommy_trie_inplace_node* node, void* data, tommy_key_t key) +{ + tommy_trie_inplace_node** let_ptr; + tommy_uint_t i; + + node->data = data; + node->key = key; + /* clear the child pointers */ + for (i = 0; i < TOMMY_TRIE_INPLACE_TREE_MAX; ++i) + node->map[i] = 0; + + let_ptr = &trie_inplace->bucket[key >> TOMMY_TRIE_INPLACE_BUCKET_SHIFT]; + + trie_inplace_bucket_insert(TOMMY_TRIE_INPLACE_BUCKET_SHIFT, let_ptr, node, key); + + ++trie_inplace->count; +} + +static tommy_trie_inplace_node* trie_inplace_bucket_remove(tommy_uint_t shift, tommy_trie_inplace_node** let_ptr, tommy_trie_inplace_node* remove, tommy_key_t key) +{ + tommy_trie_inplace_node* node; + int i; + tommy_trie_inplace_node** leaf_let_ptr; + tommy_trie_inplace_node* leaf; + + node = *let_ptr; + while (node && node->key != key) { + let_ptr = &node->map[(key >> shift) & TOMMY_TRIE_INPLACE_TREE_MASK]; + node = *let_ptr; + shift -= TOMMY_TRIE_INPLACE_TREE_BIT; + } + + if (!node) + return 0; + + /* if the node to remove is not specified */ + if (!remove) + remove = node; /* remove the first */ + + tommy_trie_inplace_list_remove(let_ptr, remove); + + /* if not change in the node, nothing more to do */ + if (*let_ptr == node) + return remove; + + /* if we have a substitute */ + if (*let_ptr != 0) { + /* copy the child pointers to the new one */ + node = *let_ptr; + for (i = 0; i < TOMMY_TRIE_INPLACE_TREE_MAX; ++i) + node->map[i] = remove->map[i]; + + return remove; + } + + /* find a leaf */ + leaf_let_ptr = 0; + leaf = remove; + + /* search backward, statistically we have more zeros than ones */ + i = TOMMY_TRIE_INPLACE_TREE_MAX - 1; + while (i >= 0) { + if (leaf->map[i]) { + leaf_let_ptr = &leaf->map[i]; + leaf = *leaf_let_ptr; + i = TOMMY_TRIE_INPLACE_TREE_MAX - 1; + continue; + } + --i; + } + + /* if it's itself a leaf */ + if (!leaf_let_ptr) + return remove; + + /* remove the leaf */ + *leaf_let_ptr = 0; + + /* copy the child pointers */ + for (i = 0; i < TOMMY_TRIE_INPLACE_TREE_MAX; ++i) + leaf->map[i] = remove->map[i]; + + /* put it in place */ + *let_ptr = leaf; + + return remove; +} + +void* tommy_trie_inplace_remove(tommy_trie_inplace* trie_inplace, tommy_key_t key) +{ + tommy_trie_inplace_node* ret; + tommy_trie_inplace_node** let_ptr; + + let_ptr = &trie_inplace->bucket[key >> TOMMY_TRIE_INPLACE_BUCKET_SHIFT]; + + ret = trie_inplace_bucket_remove(TOMMY_TRIE_INPLACE_BUCKET_SHIFT, let_ptr, 0, key); + + if (!ret) + return 0; + + --trie_inplace->count; + + return ret->data; +} + +void* tommy_trie_inplace_remove_existing(tommy_trie_inplace* trie_inplace, tommy_trie_inplace_node* node) +{ + tommy_trie_inplace_node* ret; + tommy_key_t key = node->key; + tommy_trie_inplace_node** let_ptr; + + let_ptr = &trie_inplace->bucket[key >> TOMMY_TRIE_INPLACE_BUCKET_SHIFT]; + + ret = trie_inplace_bucket_remove(TOMMY_TRIE_INPLACE_BUCKET_SHIFT, let_ptr, node, key); + + /* the element removed must match the one passed */ + assert(ret == node); + + --trie_inplace->count; + + return ret->data; +} + +tommy_trie_inplace_node* tommy_trie_inplace_bucket(tommy_trie_inplace* trie_inplace, tommy_key_t key) +{ + tommy_trie_inplace_node* node; + tommy_uint_t shift; + + node = trie_inplace->bucket[key >> TOMMY_TRIE_INPLACE_BUCKET_SHIFT]; + shift = TOMMY_TRIE_INPLACE_BUCKET_SHIFT; + + while (node && node->key != key) { + node = node->map[(key >> shift) & TOMMY_TRIE_INPLACE_TREE_MASK]; + shift -= TOMMY_TRIE_INPLACE_TREE_BIT; + } + + return node; +} + +tommy_size_t tommy_trie_inplace_memory_usage(tommy_trie_inplace* trie_inplace) +{ + return tommy_trie_inplace_count(trie_inplace) * (tommy_size_t)sizeof(tommy_trie_inplace_node); +} + diff --git a/sources/tommyds/tommytrieinp.h b/sources/tommyds/tommytrieinp.h new file mode 100755 index 00000000..9448a506 --- /dev/null +++ b/sources/tommyds/tommytrieinp.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Inplace trie. + * + * This trie is a inplace implementation not needing any external allocation. + * + * Elements are not stored in order, like ::tommy_trie, because some elements + * should be used to represent the inner nodes in the trie. + * + * You can control the number of branches of each node using the ::TOMMY_TRIE_INPLACE_TREE_MAX define. + * More branches imply more speed, but a bigger memory occupation. + * + * Compared to ::tommy_trie you should use a lower number of branches to limit the unused memory + * occupation of the leaf nodes. This imply a lower speed, but without the need of an external allocator. + * + * To initialize the trie you have to call tommy_trie_inplace_init(). + * + * \code + * tommy_trie_inplace trie_inplace; + * + * tommy_trie_inplace_init(&trie_inplace); + * \endcode + * + * To insert elements in the trie you have to call tommy_trie_inplace_insert() for + * each element. + * In the insertion call you have to specify the address of the node, the + * address of the object, and the key value to use. + * The address of the object is used to initialize the tommy_node::data field + * of the node, and the key to initialize the tommy_node::key field. + * + * \code + * struct object { + * int value; + * // other fields + * tommy_node node; + * }; + * + * struct object* obj = malloc(sizeof(struct object)); // creates the object + * + * obj->value = ...; // initializes the object + * + * tommy_trie_inplace_insert(&trie_inplace, &obj->node, obj, obj->value); // inserts the object + * \endcode + * + * To find and element in the trie you have to call tommy_trie_inplace_search() providing + * the key to search. + * + * \code + * int value_to_find = 1; + * struct object* obj = tommy_trie_inplace_search(&trie_inplace, value_to_find); + * if (!obj) { + * // not found + * } else { + * // found + * } + * \endcode + * + * To iterate over all the elements in the trie with the same key, you have to + * use tommy_trie_inplace_bucket() and follow the tommy_node::next pointer until NULL. + * + * \code + * int value_to_find = 1; + * tommy_node* i = tommy_trie_inplace_bucket(&trie_inplace, value_to_find); + * while (i) { + * struct object* obj = i->data; // gets the object pointer + * + * printf("%d\n", obj->value); // process the object + * + * i = i->next; // goes to the next element + * } + * \endcode + * + * To remove an element from the trie you have to call tommy_trie_inplace_remove() + * providing the key to search and remove. + * + * \code + * struct object* obj = tommy_trie_inplace_remove(&trie_inplace, value_to_remove); + * if (obj) { + * free(obj); // frees the object allocated memory + * } + * \endcode + * + * To destroy the trie you have only to remove all the elements, as the trie is + * completely inplace and it doesn't allocate memory. + * + * Note that you cannot iterate over all the elements in the trie using the + * trie itself. You have to insert all the elements also in a ::tommy_list, + * and use the list to iterate. See the \ref multiindex example for more detail. + */ + +#ifndef __TOMMYTRIEINP_H +#define __TOMMYTRIEINP_H + +#include "tommytypes.h" + +/******************************************************************************/ +/* trie_inplace */ + +/** + * Number of branches on each node. It must be a power of 2. + * Suggested values are 2, 4 and 8. + * Any node, including leafs, contains a pointer to each branch. + */ +#define TOMMY_TRIE_INPLACE_TREE_MAX 4 + +/** \internal + * Number of bits for each branch. + */ +#define TOMMY_TRIE_INPLACE_TREE_BIT TOMMY_ILOG2(TOMMY_TRIE_INPLACE_TREE_MAX) + +/** \internal + * Number of bits of the first level. + */ +#define TOMMY_TRIE_INPLACE_BUCKET_BIT ((TOMMY_KEY_BIT % TOMMY_TRIE_INPLACE_TREE_BIT) + 3 * TOMMY_TRIE_INPLACE_TREE_BIT) + +/** \internal + * Number of branches of the first level. + * It's like a inner branch, but bigger to get any remainder bits. + */ +#define TOMMY_TRIE_INPLACE_BUCKET_MAX (1 << TOMMY_TRIE_INPLACE_BUCKET_BIT) + +/** + * Trie node. + * This is the node that you have to include inside your objects. + */ +typedef struct tommy_trie_inplace_node_struct { + struct tommy_trie_inplace_node_struct* next; /**< Next element. 0 if it's the last. */ + struct tommy_trie_inplace_node_struct* prev; /**< Circular previous element. */ + void* data; /**< Pointer to the data. */ + tommy_key_t key; /**< Used to store the key or the hash. */ + struct tommy_trie_inplace_node_struct* map[TOMMY_TRIE_INPLACE_TREE_MAX]; /** Branches of the node. */ +} tommy_trie_inplace_node; + +/** + * Trie container type. + * \note Don't use internal fields directly, but access the container only using functions. + */ +typedef struct tommy_trie_inplace_struct { + tommy_trie_inplace_node* bucket[TOMMY_TRIE_INPLACE_BUCKET_MAX]; /**< First tree level. */ + tommy_count_t count; /**< Number of elements. */ +} tommy_trie_inplace; + +/** + * Initializes the trie. + * + * The tries is completely inplace, and it doesn't need to be deinitialized. + */ +void tommy_trie_inplace_init(tommy_trie_inplace* trie_inplace); + +/** + * Inserts an element in the trie. + */ +void tommy_trie_inplace_insert(tommy_trie_inplace* trie_inplace, tommy_trie_inplace_node* node, void* data, tommy_key_t key); + +/** + * Searches and removes the first element with the specified key. + * If the element is not found, 0 is returned. + * If more equal elements are present, the first one is removed. + * This operation is faster than calling tommy_trie_inplace_bucket() and tommy_trie_inplace_remove_existing() separately. + * \param key Key of the element to find and remove. + * \return The removed element, or 0 if not found. + */ +void* tommy_trie_inplace_remove(tommy_trie_inplace* trie_inplace, tommy_key_t key); + +/** + * Gets the bucket of the specified key. + * The bucket is guaranteed to contain ALL and ONLY the elements with the specified key. + * You can access elements in the bucket following the ::next pointer until 0. + * \param key Key of the element to find. + * \return The head of the bucket, or 0 if empty. + */ +tommy_trie_inplace_node* tommy_trie_inplace_bucket(tommy_trie_inplace* trie_inplace, tommy_key_t key); + +/** + * Searches an element in the trie. + * You have to provide the key of the element you want to find. + * If more elements with the same key are present, the first one is returned. + * \param key Key of the element to find. + * \return The first element found, or 0 if none. + */ +tommy_inline void* tommy_trie_inplace_search(tommy_trie_inplace* trie_inplace, tommy_key_t key) +{ + tommy_trie_inplace_node* i = tommy_trie_inplace_bucket(trie_inplace, key); + + if (!i) + return 0; + + return i->data; +} + +/** + * Removes an element from the trie. + * You must already have the address of the element to remove. + * \return The tommy_node::data field of the node removed. + */ +void* tommy_trie_inplace_remove_existing(tommy_trie_inplace* trie_inplace, tommy_trie_inplace_node* node); + +/** + * Gets the number of elements. + */ +tommy_inline tommy_count_t tommy_trie_inplace_count(tommy_trie_inplace* trie_inplace) +{ + return trie_inplace->count; +} + +/** + * Gets the size of allocated memory. + * It includes the size of the ::tommy_inplace_node of the stored elements. + */ +tommy_size_t tommy_trie_inplace_memory_usage(tommy_trie_inplace* trie_inplace); + +#endif + diff --git a/sources/tommyds/tommytypes.h b/sources/tommyds/tommytypes.h new file mode 100755 index 00000000..aff74647 --- /dev/null +++ b/sources/tommyds/tommytypes.h @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2010, Andrea Mazzoleni. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * Generic types. + */ + +#ifndef __TOMMYTYPES_H +#define __TOMMYTYPES_H + +/******************************************************************************/ +/* types */ + +#include + +#if defined(_MSC_VER) +typedef unsigned tommy_uint32_t; /**< Generic uint32_t type. */ +typedef unsigned _int64 tommy_uint64_t; /**< Generic uint64_t type. */ +typedef size_t tommy_uintptr_t; /**< Generic uintptr_t type. */ +#else +#include +typedef uint32_t tommy_uint32_t; /**< Generic uint32_t type. */ +typedef uint64_t tommy_uint64_t; /**< Generic uint64_t type. */ +typedef uintptr_t tommy_uintptr_t; /**< Generic uintptr_t type. */ +#endif +typedef size_t tommy_size_t; /**< Generic size_t type. */ +typedef ptrdiff_t tommy_ptrdiff_t; /**< Generic ptrdiff_t type. */ +typedef int tommy_bool_t; /**< Generic boolean type. */ + +/** + * Generic unsigned integer type. + * + * It has no specific size, as is used to store only small values. + * To make the code more efficient, a full 32 bit integer is used. + */ +typedef tommy_uint32_t tommy_uint_t; + +/** + * Generic unsigned integer for counting objects. + * + * TommyDS doesn't support more than 2^32-1 objects. + */ +typedef tommy_uint32_t tommy_count_t; + +/** \internal + * Type cast required for the C++ compilation. + * When compiling in C++ we cannot convert a void* pointer to another pointer. + * In such case we need an explicit cast. + */ +#ifdef __cplusplus +#define tommy_cast(type, value) static_cast(value) +#else +#define tommy_cast(type, value) (value) +#endif + +/******************************************************************************/ +/* heap */ + +/* by default uses malloc/calloc/realloc/free */ + +/** + * Generic malloc(), calloc(), realloc() and free() functions. + * Redefine them to what you need. By default they map to the C malloc(), calloc(), realloc() and free(). + */ +#if !defined(tommy_malloc) || !defined(tommy_calloc) || !defined(tommy_realloc) || !defined(tommy_free) +#include +#endif +#if !defined(tommy_malloc) +#define tommy_malloc malloc +#endif +#if !defined(tommy_calloc) +#define tommy_calloc calloc +#endif +#if !defined(tommy_realloc) +#define tommy_realloc realloc +#endif +#if !defined(tommy_free) +#define tommy_free free +#endif + +/******************************************************************************/ +/* modificators */ + +/** \internal + * Definition of the inline keyword if available. + */ +#if !defined(tommy_inline) +#if defined(_MSC_VER) || defined(__GNUC__) +#define tommy_inline static __inline +#else +#define tommy_inline static +#endif +#endif + +/** \internal + * Definition of the restrict keyword if available. + */ +#if !defined(tommy_restrict) +#if __STDC_VERSION__ >= 199901L +#define tommy_restrict restrict +#elif defined(_MSC_VER) || defined(__GNUC__) +#define tommy_restrict __restrict +#else +#define tommy_restrict +#endif +#endif + +/** \internal + * Hints the compiler that a condition is likely true. + */ +#if !defined(tommy_likely) +#if defined(__GNUC__) +#define tommy_likely(x) __builtin_expect(!!(x), 1) +#else +#define tommy_likely(x) (x) +#endif +#endif + +/** \internal + * Hints the compiler that a condition is likely false. + */ +#if !defined(tommy_unlikely) +#if defined(__GNUC__) +#define tommy_unlikely(x) __builtin_expect(!!(x), 0) +#else +#define tommy_unlikely(x) (x) +#endif +#endif + +/******************************************************************************/ +/* key */ + +/** + * Key type used in indexed data structures to store the key or the hash value. + */ +typedef tommy_uint32_t tommy_key_t; + +/** + * Bits into the ::tommy_key_t type. + */ +#define TOMMY_KEY_BIT (sizeof(tommy_key_t) * 8) + +/******************************************************************************/ +/* node */ + +/** + * Data structure node. + * This node type is shared between all the data structures and used to store some + * info directly into the objects you want to store. + * + * A typical declaration is: + * \code + * struct object { + * tommy_node node; + * // other fields + * }; + * \endcode + */ +typedef struct tommy_node_struct { + /** + * Next node. + * The tail node has it at 0, like a 0 terminated list. + */ + struct tommy_node_struct* next; + + /** + * Previous node. + * The head node points to the tail node, like a circular list. + */ + struct tommy_node_struct* prev; + + /** + * Pointer to the object containing the node. + * This field is initialized when inserting nodes into a data structure. + */ + void* data; + + /** + * Key used to store the node. + * With hashtables this field is used to store the hash value. + * With lists this field is not used. + */ + tommy_key_t key; +} tommy_node; + +/******************************************************************************/ +/* compare */ + +/** + * Compare function for elements. + * \param obj_a Pointer to the first object to compare. + * \param obj_b Pointer to the second object to compare. + * \return <0 if the first element is less than the second, ==0 equal, >0 if greather. + * + * This function is like the C strcmp(). + * + * \code + * struct object { + * tommy_node node; + * int value; + * }; + * + * int compare(const void* obj_a, const void* obj_b) + * { + * if (((const struct object*)obj_a)->value < ((const struct object*)obj_b)->value) + * return -1; + * if (((const struct object*)obj_a)->value > ((const struct object*)obj_b)->value) + * return 1; + * return 0; + * } + * + * tommy_list_sort(&list, compare); + * \endcode + * + */ +typedef int tommy_compare_func(const void* obj_a, const void* obj_b); + +/** + * Search function for elements. + * \param arg Pointer to the value to search as passed at the search function. + * \param obj Pointer to the object to compare to. + * \return ==0 if the value matches the element. !=0 if different. + * + * The first argument is a pointer to the value to search exactly + * as it's passed at the search function called. + * The second argument is a pointer to the object inside the hashtable to compare. + * + * The return value has to be 0 if the values are equal. != 0 if they are different. + * + * \code + * struct object { + * tommy_node node; + * int value; + * }; + * + * int compare(const void* arg, const void* obj) + * { + * const int* value_to_find = arg; + * const struct object* object_to_compare = obj; + * + * return *value_to_find != object_to_compare->value; + * } + * + * int value_to_find = 1; + * struct object* obj = tommy_hashtable_search(&hashtable, compare, &value_to_find, tommy_inthash_u32(value_to_find)); + * if (!obj) { + * // not found + * } else { + * // found + * } + * \endcode + * + */ +typedef int tommy_search_func(const void* arg, const void* obj); + +/** + * Foreach function. + * \param obj Pointer to the object to iterate. + * + * A typical example is to use free() to deallocate all the objects in a list. + * \code + * tommy_list_foreach(&list, (tommy_foreach_func*)free); + * \endcode + */ +typedef void tommy_foreach_func(void* obj); + +/** + * Foreach function with an argument. + * \param arg Pointer to a generic argument. + * \param obj Pointer to the object to iterate. + */ +typedef void tommy_foreach_arg_func(void* arg, void* obj); + +/******************************************************************************/ +/* bit hacks */ + +#if defined(_MSC_VER) && !defined(__cplusplus) +#include +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#endif + +#if defined(_MSC_VER) && _MSC_VER>=1300 +#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + unsigned char _BitScanForward(unsigned long *index, unsigned long mask); + unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#define BitScanForward _BitScanForward +#define BitScanReverse _BitScanReverse +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) +#endif /* BitScanForward */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + +/** \internal + * Integer log2 for constants. + * You can use it only for exact power of 2 up to 256. + */ +#define TOMMY_ILOG2(value) ((value) == 256 ? 8 : (value) == 128 ? 7 : (value) == 64 ? 6 : (value) == 32 ? 5 : (value) == 16 ? 4 : (value) == 8 ? 3 : (value) == 4 ? 2 : (value) == 2 ? 1 : 0) + +/** + * Bit scan reverse or integer log2. + * Return the bit index of the most significant 1 bit. + * + * If no bit is set, the result is undefined. + * To force a return 0 in this case, you can use tommy_ilog2_u32(value | 1). + * + * Other interesting ways for bitscan are at: + * + * Bit Twiddling Hacks + * http://graphics.stanford.edu/~seander/bithacks.html + * + * Chess Programming BitScan + * http://chessprogramming.wikispaces.com/BitScan + * + * \param value Value to scan. 0 is not allowed. + * \return The index of the most significant bit set. + */ +tommy_inline tommy_uint_t tommy_ilog2_u32(tommy_uint32_t value) +{ +#if defined(_MSC_VER) + unsigned long count; + _BitScanReverse(&count, value); + return count; +#elif defined(__GNUC__) + /* + * GCC implements __builtin_clz(x) as "__builtin_clz(x) = bsr(x) ^ 31" + * + * Where "x ^ 31 = 31 - x", but gcc does not optimize "31 - __builtin_clz(x)" to bsr(x), + * but generates 31 - (bsr(x) xor 31). + * + * So we write "__builtin_clz(x) ^ 31" instead of "31 - __builtin_clz(x)", + * to allow the double xor to be optimized out. + */ + return __builtin_clz(value) ^ 31; +#else + /* Find the log base 2 of an N-bit integer in O(lg(N)) operations with multiply and lookup */ + /* from http://graphics.stanford.edu/~seander/bithacks.html */ + static unsigned char TOMMY_DE_BRUIJN_INDEX_ILOG2[32] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + + value |= value >> 1; + value |= value >> 2; + value |= value >> 4; + value |= value >> 8; + value |= value >> 16; + + return TOMMY_DE_BRUIJN_INDEX_ILOG2[(tommy_uint32_t)(value * 0x07C4ACDDU) >> 27]; +#endif +} + +/** + * Bit scan forward or trailing zero count. + * Return the bit index of the least significant 1 bit. + * + * If no bit is set, the result is undefined. + * \param value Value to scan. 0 is not allowed. + * \return The index of the least significant bit set. + */ +tommy_inline tommy_uint_t tommy_ctz_u32(tommy_uint32_t value) +{ +#if defined(_MSC_VER) + unsigned long count; + _BitScanForward(&count, value); + return count; +#elif defined(__GNUC__) + return __builtin_ctz(value); +#else + /* Count the consecutive zero bits (trailing) on the right with multiply and lookup */ + /* from http://graphics.stanford.edu/~seander/bithacks.html */ + static const unsigned char TOMMY_DE_BRUIJN_INDEX_CTZ[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + + return TOMMY_DE_BRUIJN_INDEX_CTZ[(tommy_uint32_t)(((value & - value) * 0x077CB531U)) >> 27]; +#endif +} + +/** + * Rounds up to the next power of 2. + * For the value 0, the result is undefined. + * \return The smallest power of 2 not less than the specified value. + */ +tommy_inline tommy_uint32_t tommy_roundup_pow2_u32(tommy_uint32_t value) +{ + /* Round up to the next highest power of 2 */ + /* from http://graphics.stanford.edu/~seander/bithacks.html */ + + --value; + value |= value >> 1; + value |= value >> 2; + value |= value >> 4; + value |= value >> 8; + value |= value >> 16; + ++value; + + return value; +} + +/** + * Check if the specified word has a byte at 0. + * \return 0 or 1. + */ +tommy_inline int tommy_haszero_u32(tommy_uint32_t value) +{ + return ((value - 0x01010101) & ~value & 0x80808080) != 0; +} +#endif +